2026-03-10T09:42:49.755 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-10T09:42:49.760 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T09:42:49.780 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/988 branch: squid description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/classic} email: null first_in_suite: false flavor: default job_id: '988' last_in_suite: false machine_type: vps name: kyr-2026-03-10_01:00:38-orch-squid-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 1 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: true mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_STRAY_DAEMON - CEPHADM_FAILED_DAEMON - CEPHADM_AGENT_DOWN log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath workunit: branch: tt-squid sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - grafana.a - node-exporter.b seed: 8043 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b targets: vm00.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFiiCOy+7P6S43EKg5DS0ff0Aconf7GcDpfM0HkfL7DoMTX42/MlvYkcRDBLIVXqLO+J+/pNfMf3xfPiL87lEnY= vm03.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAYOoEuF1KGcjHgyyYBU+ZU0i12tZGe6c4Cn54oK/UItiOgaO3u/RV9zryQBdra4dO2jSLNfQtH0eZR+GuNo4/c= tasks: - cephadm: cephadm_branch: v17.2.0 cephadm_git_url: https://github.com/ceph/ceph image: quay.io/ceph/ceph:v17.2.0 - cephadm.shell: mon.a: - ceph config set mgr mgr/cephadm/use_repo_digest false --force - cephadm.shell: env: - sha1 mon.a: - radosgw-admin realm create --rgw-realm=r --default - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default - radosgw-admin period update --rgw-realm=r --commit - ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000 - ceph orch apply rgw smpl - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - sleep 120 - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: - sha1 mon.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph versions - ceph orch upgrade status - ceph health detail - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 - ceph orch ls | grep '^osd ' - cephadm.shell: mon.a: - ceph orch upgrade ls - ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0 - ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2 teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-10_01:00:38 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-10T09:42:49.780 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa; will attempt to use it 2026-03-10T09:42:49.781 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa/tasks 2026-03-10T09:42:49.781 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-10T09:42:49.781 INFO:teuthology.task.internal:Checking packages... 2026-03-10T09:42:49.781 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-10T09:42:49.781 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-10T09:42:49.781 INFO:teuthology.packaging:ref: None 2026-03-10T09:42:49.781 INFO:teuthology.packaging:tag: None 2026-03-10T09:42:49.781 INFO:teuthology.packaging:branch: squid 2026-03-10T09:42:49.781 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:42:49.781 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-10T09:42:50.517 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-10T09:42:50.518 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-10T09:42:50.518 INFO:teuthology.task.internal:no buildpackages task found 2026-03-10T09:42:50.518 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-10T09:42:50.519 INFO:teuthology.task.internal:Saving configuration 2026-03-10T09:42:50.524 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-10T09:42:50.524 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-10T09:42:50.530 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm00.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/988', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 09:41:36.130078', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:00', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFiiCOy+7P6S43EKg5DS0ff0Aconf7GcDpfM0HkfL7DoMTX42/MlvYkcRDBLIVXqLO+J+/pNfMf3xfPiL87lEnY='} 2026-03-10T09:42:50.536 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm03.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/988', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 09:41:36.129661', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:03', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAYOoEuF1KGcjHgyyYBU+ZU0i12tZGe6c4Cn54oK/UItiOgaO3u/RV9zryQBdra4dO2jSLNfQtH0eZR+GuNo4/c='} 2026-03-10T09:42:50.536 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-10T09:42:50.537 INFO:teuthology.task.internal:roles: ubuntu@vm00.local - ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a', 'alertmanager.a'] 2026-03-10T09:42:50.537 INFO:teuthology.task.internal:roles: ubuntu@vm03.local - ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b'] 2026-03-10T09:42:50.537 INFO:teuthology.run_tasks:Running task console_log... 2026-03-10T09:42:50.543 DEBUG:teuthology.task.console_log:vm00 does not support IPMI; excluding 2026-03-10T09:42:50.547 DEBUG:teuthology.task.console_log:vm03 does not support IPMI; excluding 2026-03-10T09:42:50.547 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fb415a7e170>, signals=[15]) 2026-03-10T09:42:50.547 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-10T09:42:50.548 INFO:teuthology.task.internal:Opening connections... 2026-03-10T09:42:50.548 DEBUG:teuthology.task.internal:connecting to ubuntu@vm00.local 2026-03-10T09:42:50.549 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T09:42:50.609 DEBUG:teuthology.task.internal:connecting to ubuntu@vm03.local 2026-03-10T09:42:50.610 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T09:42:50.672 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-10T09:42:50.673 DEBUG:teuthology.orchestra.run.vm00:> uname -m 2026-03-10T09:42:50.708 INFO:teuthology.orchestra.run.vm00.stdout:x86_64 2026-03-10T09:42:50.709 DEBUG:teuthology.orchestra.run.vm00:> cat /etc/os-release 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:NAME="CentOS Stream" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:VERSION="9" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:ID="centos" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:ID_LIKE="rhel fedora" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:VERSION_ID="9" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:PLATFORM_ID="platform:el9" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:ANSI_COLOR="0;31" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:LOGO="fedora-logo-icon" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:HOME_URL="https://centos.org/" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T09:42:50.765 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T09:42:50.766 INFO:teuthology.lock.ops:Updating vm00.local on lock server 2026-03-10T09:42:50.770 DEBUG:teuthology.orchestra.run.vm03:> uname -m 2026-03-10T09:42:50.786 INFO:teuthology.orchestra.run.vm03.stdout:x86_64 2026-03-10T09:42:50.786 DEBUG:teuthology.orchestra.run.vm03:> cat /etc/os-release 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:NAME="CentOS Stream" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:VERSION="9" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:ID="centos" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:ID_LIKE="rhel fedora" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:VERSION_ID="9" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:PLATFORM_ID="platform:el9" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:ANSI_COLOR="0;31" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:LOGO="fedora-logo-icon" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:HOME_URL="https://centos.org/" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T09:42:50.840 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T09:42:50.841 INFO:teuthology.lock.ops:Updating vm03.local on lock server 2026-03-10T09:42:50.845 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-10T09:42:50.847 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-10T09:42:50.848 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-10T09:42:50.848 DEBUG:teuthology.orchestra.run.vm00:> test '!' -e /home/ubuntu/cephtest 2026-03-10T09:42:50.850 DEBUG:teuthology.orchestra.run.vm03:> test '!' -e /home/ubuntu/cephtest 2026-03-10T09:42:50.895 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-10T09:42:50.896 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-10T09:42:50.896 DEBUG:teuthology.orchestra.run.vm00:> test -z $(ls -A /var/lib/ceph) 2026-03-10T09:42:50.905 DEBUG:teuthology.orchestra.run.vm03:> test -z $(ls -A /var/lib/ceph) 2026-03-10T09:42:50.918 INFO:teuthology.orchestra.run.vm00.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T09:42:50.949 INFO:teuthology.orchestra.run.vm03.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T09:42:50.949 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-10T09:42:50.956 DEBUG:teuthology.orchestra.run.vm00:> test -e /ceph-qa-ready 2026-03-10T09:42:50.973 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T09:42:51.169 DEBUG:teuthology.orchestra.run.vm03:> test -e /ceph-qa-ready 2026-03-10T09:42:51.183 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T09:42:51.382 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-10T09:42:51.383 INFO:teuthology.task.internal:Creating test directory... 2026-03-10T09:42:51.383 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T09:42:51.385 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T09:42:51.400 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-10T09:42:51.401 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-10T09:42:51.402 INFO:teuthology.task.internal:Creating archive directory... 2026-03-10T09:42:51.402 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T09:42:51.440 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T09:42:51.457 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-10T09:42:51.458 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-10T09:42:51.458 DEBUG:teuthology.orchestra.run.vm00:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T09:42:51.509 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T09:42:51.509 DEBUG:teuthology.orchestra.run.vm03:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T09:42:51.522 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T09:42:51.522 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T09:42:51.552 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T09:42:51.573 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T09:42:51.581 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T09:42:51.586 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T09:42:51.595 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T09:42:51.596 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-10T09:42:51.597 INFO:teuthology.task.internal:Configuring sudo... 2026-03-10T09:42:51.597 DEBUG:teuthology.orchestra.run.vm00:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T09:42:51.624 DEBUG:teuthology.orchestra.run.vm03:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T09:42:51.660 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-10T09:42:51.663 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-10T09:42:51.663 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T09:42:51.691 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T09:42:51.718 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T09:42:51.766 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T09:42:51.822 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:42:51.822 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T09:42:51.878 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T09:42:51.903 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T09:42:51.958 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T09:42:51.958 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T09:42:52.017 DEBUG:teuthology.orchestra.run.vm00:> sudo service rsyslog restart 2026-03-10T09:42:52.018 DEBUG:teuthology.orchestra.run.vm03:> sudo service rsyslog restart 2026-03-10T09:42:52.042 INFO:teuthology.orchestra.run.vm00.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T09:42:52.082 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T09:42:52.399 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-10T09:42:52.401 INFO:teuthology.task.internal:Starting timer... 2026-03-10T09:42:52.401 INFO:teuthology.run_tasks:Running task pcp... 2026-03-10T09:42:52.403 INFO:teuthology.run_tasks:Running task selinux... 2026-03-10T09:42:52.405 INFO:teuthology.task.selinux:Excluding vm00: VMs are not yet supported 2026-03-10T09:42:52.405 INFO:teuthology.task.selinux:Excluding vm03: VMs are not yet supported 2026-03-10T09:42:52.405 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-10T09:42:52.406 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-10T09:42:52.406 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-10T09:42:52.406 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-10T09:42:52.407 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-10T09:42:52.407 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-10T09:42:52.409 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-10T09:42:53.142 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-10T09:42:53.147 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-10T09:42:53.147 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventorysbax_hn0 --limit vm00.local,vm03.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-10T09:45:34.217 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm00.local'), Remote(name='ubuntu@vm03.local')] 2026-03-10T09:45:34.218 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm00.local' 2026-03-10T09:45:34.218 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T09:45:34.288 DEBUG:teuthology.orchestra.run.vm00:> true 2026-03-10T09:45:34.373 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm00.local' 2026-03-10T09:45:34.373 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm03.local' 2026-03-10T09:45:34.374 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T09:45:34.439 DEBUG:teuthology.orchestra.run.vm03:> true 2026-03-10T09:45:34.516 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm03.local' 2026-03-10T09:45:34.516 INFO:teuthology.run_tasks:Running task clock... 2026-03-10T09:45:34.518 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-10T09:45:34.518 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T09:45:34.518 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T09:45:34.520 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T09:45:34.520 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T09:45:34.554 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T09:45:34.578 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T09:45:34.600 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T09:45:34.611 INFO:teuthology.orchestra.run.vm00.stderr:sudo: ntpd: command not found 2026-03-10T09:45:34.612 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T09:45:34.627 INFO:teuthology.orchestra.run.vm00.stdout:506 Cannot talk to daemon 2026-03-10T09:45:34.633 INFO:teuthology.orchestra.run.vm03.stderr:sudo: ntpd: command not found 2026-03-10T09:45:34.643 INFO:teuthology.orchestra.run.vm03.stdout:506 Cannot talk to daemon 2026-03-10T09:45:34.646 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T09:45:34.655 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T09:45:34.670 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T09:45:34.670 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T09:45:34.722 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-10T09:45:34.725 INFO:teuthology.orchestra.run.vm00.stderr:bash: line 1: ntpq: command not found 2026-03-10T09:45:34.726 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T09:45:34.726 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-10T09:45:34.727 INFO:teuthology.orchestra.run.vm00.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T09:45:34.727 INFO:teuthology.orchestra.run.vm00.stdout:=============================================================================== 2026-03-10T09:45:34.728 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-10T09:45:34.805 INFO:tasks.cephadm:Config: {'cephadm_branch': 'v17.2.0', 'cephadm_git_url': 'https://github.com/ceph/ceph', 'image': 'quay.io/ceph/ceph:v17.2.0', 'conf': {'global': {'mon election default strategy': 1}, 'mgr': {'debug mgr': 20, 'debug ms': 1, 'mgr/cephadm/use_agent': True}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_STRAY_DAEMON', 'CEPHADM_FAILED_DAEMON', 'CEPHADM_AGENT_DOWN'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-10T09:45:34.806 INFO:tasks.cephadm:Cluster image is quay.io/ceph/ceph:v17.2.0 2026-03-10T09:45:34.806 INFO:tasks.cephadm:Cluster fsid is e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:45:34.806 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-10T09:45:34.806 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.100', 'mon.c': '[v2:192.168.123.100:3301,v1:192.168.123.100:6790]', 'mon.b': '192.168.123.103'} 2026-03-10T09:45:34.806 INFO:tasks.cephadm:First mon is mon.a on vm00 2026-03-10T09:45:34.806 INFO:tasks.cephadm:First mgr is y 2026-03-10T09:45:34.806 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-10T09:45:34.806 DEBUG:teuthology.orchestra.run.vm00:> sudo hostname $(hostname -s) 2026-03-10T09:45:34.848 DEBUG:teuthology.orchestra.run.vm03:> sudo hostname $(hostname -s) 2026-03-10T09:45:34.882 INFO:tasks.cephadm:Downloading cephadm (repo https://github.com/ceph/ceph ref v17.2.0)... 2026-03-10T09:45:34.882 DEBUG:teuthology.orchestra.run.vm00:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T09:45:35.145 INFO:teuthology.orchestra.run.vm00.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 10 09:45 /home/ubuntu/cephtest/cephadm 2026-03-10T09:45:35.145 DEBUG:teuthology.orchestra.run.vm03:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T09:45:35.233 INFO:teuthology.orchestra.run.vm03.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 10 09:45 /home/ubuntu/cephtest/cephadm 2026-03-10T09:45:35.234 DEBUG:teuthology.orchestra.run.vm00:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T09:45:35.256 DEBUG:teuthology.orchestra.run.vm03:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T09:45:35.285 INFO:tasks.cephadm:Pulling image quay.io/ceph/ceph:v17.2.0 on all hosts... 2026-03-10T09:45:35.285 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-10T09:45:35.300 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-10T09:45:35.522 INFO:teuthology.orchestra.run.vm00.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T09:45:35.529 INFO:teuthology.orchestra.run.vm03.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T09:45:58.391 INFO:teuthology.orchestra.run.vm03.stdout:{ 2026-03-10T09:45:58.391 INFO:teuthology.orchestra.run.vm03.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-10T09:45:58.391 INFO:teuthology.orchestra.run.vm03.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-10T09:45:58.391 INFO:teuthology.orchestra.run.vm03.stdout: "repo_digests": [ 2026-03-10T09:45:58.391 INFO:teuthology.orchestra.run.vm03.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-10T09:45:58.391 INFO:teuthology.orchestra.run.vm03.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-10T09:45:58.391 INFO:teuthology.orchestra.run.vm03.stdout: ] 2026-03-10T09:45:58.391 INFO:teuthology.orchestra.run.vm03.stdout:} 2026-03-10T09:45:58.744 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:45:58.744 INFO:teuthology.orchestra.run.vm00.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-10T09:45:58.744 INFO:teuthology.orchestra.run.vm00.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-10T09:45:58.744 INFO:teuthology.orchestra.run.vm00.stdout: "repo_digests": [ 2026-03-10T09:45:58.744 INFO:teuthology.orchestra.run.vm00.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-10T09:45:58.744 INFO:teuthology.orchestra.run.vm00.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-10T09:45:58.744 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-10T09:45:58.744 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:45:58.774 DEBUG:teuthology.orchestra.run.vm00:> sudo mkdir -p /etc/ceph 2026-03-10T09:45:58.812 DEBUG:teuthology.orchestra.run.vm03:> sudo mkdir -p /etc/ceph 2026-03-10T09:45:58.846 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 777 /etc/ceph 2026-03-10T09:45:58.883 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 777 /etc/ceph 2026-03-10T09:45:58.916 INFO:tasks.cephadm:Writing seed config... 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [global] mon election default strategy = 1 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [mgr] mgr/cephadm/use_agent = True 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-10T09:45:58.917 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-10T09:45:58.917 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:45:58.917 DEBUG:teuthology.orchestra.run.vm00:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-10T09:45:58.947 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = e2d4b2ee-1c65-11f1-bae0-b525704df8fa mon election default strategy = 1 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 mgr/cephadm/use_agent = True [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-10T09:45:58.947 DEBUG:teuthology.orchestra.run.vm00:mon.a> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.a.service 2026-03-10T09:45:58.989 DEBUG:teuthology.orchestra.run.vm00:mgr.y> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service 2026-03-10T09:45:59.032 INFO:tasks.cephadm:Bootstrapping... 2026-03-10T09:45:59.032 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 -v bootstrap --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.100 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-10T09:45:59.217 INFO:teuthology.orchestra.run.vm00.stderr:-------------------------------------------------------------------------------- 2026-03-10T09:45:59.217 INFO:teuthology.orchestra.run.vm00.stderr:cephadm ['--image', 'quay.io/ceph/ceph:v17.2.0', '-v', 'bootstrap', '--fsid', 'e2d4b2ee-1c65-11f1-bae0-b525704df8fa', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'y', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.100', '--skip-admin-label'] 2026-03-10T09:45:59.239 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-10T09:45:59.242 INFO:teuthology.orchestra.run.vm00.stderr:Verifying podman|docker is present... 2026-03-10T09:45:59.265 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-10T09:45:59.268 INFO:teuthology.orchestra.run.vm00.stderr:Verifying lvm2 is present... 2026-03-10T09:45:59.268 INFO:teuthology.orchestra.run.vm00.stderr:Verifying time synchronization is in place... 2026-03-10T09:45:59.280 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T09:45:59.296 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: inactive 2026-03-10T09:45:59.308 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: enabled 2026-03-10T09:45:59.317 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: active 2026-03-10T09:45:59.317 INFO:teuthology.orchestra.run.vm00.stderr:Unit chronyd.service is enabled and running 2026-03-10T09:45:59.317 INFO:teuthology.orchestra.run.vm00.stderr:Repeating the final host check... 2026-03-10T09:45:59.350 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-10T09:45:59.354 INFO:teuthology.orchestra.run.vm00.stderr:podman (/bin/podman) version 5.8.0 is present 2026-03-10T09:45:59.354 INFO:teuthology.orchestra.run.vm00.stderr:systemctl is present 2026-03-10T09:45:59.354 INFO:teuthology.orchestra.run.vm00.stderr:lvcreate is present 2026-03-10T09:45:59.365 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T09:45:59.372 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: inactive 2026-03-10T09:45:59.381 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: enabled 2026-03-10T09:45:59.396 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: active 2026-03-10T09:45:59.396 INFO:teuthology.orchestra.run.vm00.stderr:Unit chronyd.service is enabled and running 2026-03-10T09:45:59.396 INFO:teuthology.orchestra.run.vm00.stderr:Host looks OK 2026-03-10T09:45:59.396 INFO:teuthology.orchestra.run.vm00.stderr:Cluster fsid: e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:45:59.397 INFO:teuthology.orchestra.run.vm00.stderr:Acquiring lock 140297611521088 on /run/cephadm/e2d4b2ee-1c65-11f1-bae0-b525704df8fa.lock 2026-03-10T09:45:59.397 INFO:teuthology.orchestra.run.vm00.stderr:Lock 140297611521088 acquired on /run/cephadm/e2d4b2ee-1c65-11f1-bae0-b525704df8fa.lock 2026-03-10T09:45:59.397 INFO:teuthology.orchestra.run.vm00.stderr:Verifying IP 192.168.123.100 port 3300 ... 2026-03-10T09:45:59.397 INFO:teuthology.orchestra.run.vm00.stderr:Verifying IP 192.168.123.100 port 6789 ... 2026-03-10T09:45:59.398 INFO:teuthology.orchestra.run.vm00.stderr:Base mon IP is 192.168.123.100, final addrv is [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-03-10T09:45:59.407 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.100 metric 100 2026-03-10T09:45:59.408 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.100 metric 100 2026-03-10T09:45:59.412 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: ::1 dev lo proto kernel metric 256 pref medium 2026-03-10T09:45:59.413 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-10T09:45:59.416 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-10T09:45:59.416 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: inet6 ::1/128 scope host 2026-03-10T09:45:59.416 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-10T09:45:59.416 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 2: eth0: mtu 1500 state UP qlen 1000 2026-03-10T09:45:59.416 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: inet6 fe80::5055:ff:fe00:0/64 scope link noprefixroute 2026-03-10T09:45:59.416 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-10T09:45:59.417 INFO:teuthology.orchestra.run.vm00.stderr:Mon IP `192.168.123.100` is in CIDR network `192.168.123.0/24` 2026-03-10T09:45:59.417 INFO:teuthology.orchestra.run.vm00.stderr:- internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-10T09:45:59.418 INFO:teuthology.orchestra.run.vm00.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T09:45:59.457 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Trying to pull quay.io/ceph/ceph:v17.2.0... 2026-03-10T09:46:00.683 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Getting image source signatures 2026-03-10T09:46:00.684 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:33ca8fff7868c4dc0c11e09bca97c720eb9cfbab7221216754367dd8de70388a 2026-03-10T09:46:00.684 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:89b4a75bc2d8500f15463747507c9623df43886c134463e7f0527e70900e7a7b 2026-03-10T09:46:00.684 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:a70843738bb77e1ab9c1f85969ebdfa55f178e746be081d1cb4f94011f69eb7c 2026-03-10T09:46:00.684 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:c32ab78b488d0c72f64eded765c0cf6b5bf2c75dab66cb62a9d367fa6ec42513 2026-03-10T09:46:00.684 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:599d07cb321ff0a3c82224e1138fc685793fa69b93ed5780415751a5f7e4b8c2 2026-03-10T09:46:00.684 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying config sha256:e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-10T09:46:00.687 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Writing manifest to image destination 2026-03-10T09:46:00.691 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-10T09:46:00.835 INFO:teuthology.orchestra.run.vm00.stderr:ceph: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-10T09:46:00.865 INFO:teuthology.orchestra.run.vm00.stderr:Ceph version: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-10T09:46:00.865 INFO:teuthology.orchestra.run.vm00.stderr:Extracting ceph user uid/gid from container image... 2026-03-10T09:46:00.960 INFO:teuthology.orchestra.run.vm00.stderr:stat: 167 167 2026-03-10T09:46:00.984 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial keys... 2026-03-10T09:46:01.066 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQBZ6K9pfmr1AxAARZv4avw+kP9XbbQUheER4g== 2026-03-10T09:46:01.185 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQBZ6K9pBIwPCxAAeUwxJdvEeGgxke9GQBQffw== 2026-03-10T09:46:01.314 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQBZ6K9pwBaxEhAAIVtIcrIBnqjKGEtw7K/WZg== 2026-03-10T09:46:01.340 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial monmap... 2026-03-10T09:46:01.447 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T09:46:01.447 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: setting min_mon_release = octopus 2026-03-10T09:46:01.447 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: set fsid to e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:01.447 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T09:46:01.477 INFO:teuthology.orchestra.run.vm00.stderr:monmaptool for a [v2:192.168.123.100:3300,v1:192.168.123.100:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T09:46:01.477 INFO:teuthology.orchestra.run.vm00.stderr:setting min_mon_release = octopus 2026-03-10T09:46:01.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: set fsid to e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:01.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T09:46:01.477 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T09:46:01.477 INFO:teuthology.orchestra.run.vm00.stderr:Creating mon... 2026-03-10T09:46:01.631 INFO:teuthology.orchestra.run.vm00.stderr:create mon.a on 2026-03-10T09:46:01.848 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-10T09:46:02.017 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa.target → /etc/systemd/system/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa.target. 2026-03-10T09:46:02.017 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph.target.wants/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa.target → /etc/systemd/system/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa.target. 2026-03-10T09:46:02.352 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to reset failed state of unit ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.a.service: Unit ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.a.service not loaded. 2026-03-10T09:46:02.364 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa.target.wants/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.a.service → /etc/systemd/system/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@.service. 2026-03-10T09:46:02.739 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T09:46:02.739 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to enable service . firewalld.service is not available 2026-03-10T09:46:02.739 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mon to start... 2026-03-10T09:46:02.739 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mon... 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: cluster: 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: id: e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: health: HEALTH_OK 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: services: 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon: 1 daemons, quorum a (age 0.182711s) 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr: no daemons active 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd: 0 osds: 0 up, 0 in 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: data: 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: pools: 0 pools, 0 pgs 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: objects: 0 objects, 0 B 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: usage: 0 B used, 0 B / 0 B avail 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: pgs: 2026-03-10T09:46:02.964 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:03.762 INFO:teuthology.orchestra.run.vm00.stderr:mon is available 2026-03-10T09:46:03.762 INFO:teuthology.orchestra.run.vm00.stderr:Assimilating anything we can from ceph.conf... 2026-03-10T09:46:03.977 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:03.977 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [global] 2026-03-10T09:46:03.977 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: fsid = e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:03.977 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_host = [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-03-10T09:46:03.977 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-10T09:46:03.977 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [mgr] 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/cephadm/use_agent = True 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [osd] 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-10T09:46:03.978 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-10T09:46:04.012 INFO:teuthology.orchestra.run.vm00.stderr:Generating new minimal ceph.conf... 2026-03-10T09:46:04.298 INFO:teuthology.orchestra.run.vm00.stderr:Restarting the monitor... 2026-03-10T09:46:04.930 INFO:teuthology.orchestra.run.vm00.stderr:Setting mon public_network to 192.168.123.0/24 2026-03-10T09:46:05.149 INFO:teuthology.orchestra.run.vm00.stderr:Wrote config to /etc/ceph/ceph.conf 2026-03-10T09:46:05.149 INFO:teuthology.orchestra.run.vm00.stderr:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-10T09:46:05.149 INFO:teuthology.orchestra.run.vm00.stderr:Creating mgr... 2026-03-10T09:46:05.149 INFO:teuthology.orchestra.run.vm00.stderr:Verifying port 9283 ... 2026-03-10T09:46:05.315 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to reset failed state of unit ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Unit ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service not loaded. 2026-03-10T09:46:05.328 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa.target.wants/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service → /etc/systemd/system/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@.service. 2026-03-10T09:46:05.908 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T09:46:05.908 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to enable service . firewalld.service is not available 2026-03-10T09:46:05.908 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T09:46:05.908 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to open ports <[9283]>. firewalld.service is not available 2026-03-10T09:46:05.908 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr to start... 2026-03-10T09:46:05.908 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr... 2026-03-10T09:46:06.214 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "e2d4b2ee-1c65-11f1-bae0-b525704df8fa", 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "a" 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 1, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T09:46:02.779899+0000", 2026-03-10T09:46:06.215 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T09:46:06.216 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:06.216 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T09:46:06.216 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T09:46:06.242 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (1/15)... 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "e2d4b2ee-1c65-11f1-bae0-b525704df8fa", 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "a" 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 3, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T09:46:08.533 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T09:46:08.534 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T09:46:08.534 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T09:46:08.534 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:08.534 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T09:46:08.534 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T09:46:08.534 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T09:46:08.534 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T09:46:08.534 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T09:46:08.534 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T09:46:02.779899+0000", 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T09:46:08.535 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T09:46:08.594 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (2/15)... 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "e2d4b2ee-1c65-11f1-bae0-b525704df8fa", 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "a" 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 5, 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:10.912 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T09:46:10.913 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T09:46:10.913 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:10.913 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T09:46:10.913 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:10.913 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T09:46:10.913 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T09:46:10.913 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T09:46:10.913 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T09:46:02.779899+0000", 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:10.914 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T09:46:10.915 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T09:46:10.950 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (3/15)... 2026-03-10T09:46:13.258 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "e2d4b2ee-1c65-11f1-bae0-b525704df8fa", 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "a" 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 8, 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T09:46:13.259 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T09:46:13.260 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T09:46:13.260 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T09:46:13.260 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T09:46:02.779899+0000", 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T09:46:13.261 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T09:46:13.299 INFO:teuthology.orchestra.run.vm00.stderr:mgr is available 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [global] 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: fsid = e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [mgr] 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [osd] 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-10T09:46:13.588 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-10T09:46:13.619 INFO:teuthology.orchestra.run.vm00.stderr:Enabling cephadm module... 2026-03-10T09:46:15.593 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T09:46:15.593 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 5, 2026-03-10T09:46:15.593 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-10T09:46:15.593 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-10T09:46:15.593 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-10T09:46:15.593 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T09:46:15.681 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for the mgr to restart... 2026-03-10T09:46:15.681 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr epoch 5... 2026-03-10T09:46:16.028 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:16.027+0000 7f83185df000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:46:16.535 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:16.451+0000 7f83185df000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:46:16.789 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:16.643+0000 7f83185df000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:46:16.789 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:16.708+0000 7f83185df000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:46:17.039 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:16.899+0000 7f83185df000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:46:17.806 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:17.547+0000 7f83185df000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:46:17.806 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:17.754+0000 7f83185df000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:46:18.056 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:17.843+0000 7f83185df000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:46:18.056 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:17.905+0000 7f83185df000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:46:18.056 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:17.971+0000 7f83185df000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:46:18.056 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:18.038+0000 7f83185df000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:46:18.558 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:18.358+0000 7f83185df000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:46:18.558 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:18.435+0000 7f83185df000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:46:19.147 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:19.029+0000 7f83185df000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:46:19.148 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:19.094+0000 7f83185df000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:46:19.400 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:19.160+0000 7f83185df000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:46:19.400 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:19.289+0000 7f83185df000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:46:19.400 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:19.369+0000 7f83185df000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:46:19.651 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:19.476+0000 7f83185df000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:46:19.651 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:19.572+0000 7f83185df000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:46:19.903 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:19.898+0000 7f83185df000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:46:20.154 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:19.961+0000 7f83185df000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:46:20.661 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:46:20] ENGINE Bus STARTING 2026-03-10T09:46:20.913 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:46:20] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:46:20.914 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:46:20] ENGINE Bus STARTED 2026-03-10T09:46:21.092 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T09:46:21.092 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap_epoch": 7, 2026-03-10T09:46:21.092 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "initialized": true 2026-03-10T09:46:21.092 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T09:46:21.131 INFO:teuthology.orchestra.run.vm00.stderr:mgr epoch 5 is available 2026-03-10T09:46:21.131 INFO:teuthology.orchestra.run.vm00.stderr:Setting orchestrator backend to cephadm... 2026-03-10T09:46:21.717 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: value unchanged 2026-03-10T09:46:21.746 INFO:teuthology.orchestra.run.vm00.stderr:Generating ssh key... 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: Generating public/private rsa key pair. 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: Your identification has been saved in /tmp/tmppyjxg59y/key. 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: Your public key has been saved in /tmp/tmppyjxg59y/key.pub. 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: The key fingerprint is: 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: SHA256:D3g+HstZH2vVhfvP1Z5yOavO9IsQUh5NGRZlhh3xkHk ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: The key's randomart image is: 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: +---[RSA 3072]----+ 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: | =OOo| 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: | +o=oE| 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: | o . o.| 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: | . o . . .| 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: | . S. o o.| 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: | o o. . o o| 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: | + o..o .+| 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: | o * .=+o*+| 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: | = .+=+=O| 2026-03-10T09:46:22.188 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: +----[SHA256]-----+ 2026-03-10T09:46:22.451 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCfd3myUt3TUrOTfqpOmPK7pGKCqBi9nq137mBQLasbtXoUK5BLxV6N73cOi9DUmGzLYpwT3FQHfifIA/O2KCZvdHPU9iXlVsBHKUOjOlnvqtYjMwjgsCogB9kgAcMwLTvtBMZiYm0fGnJIZTbc8lBVn08SGkPxQyPGLPeUk3HF8VqJvYPpVFOeVkS3R00XjeEaLBw0jSYPMQXNammOnRaPFxSR2AfyZz3FB/AExXTGw2JOPzvswMI3AU7yONryaqJSR9nn9akWOgt0J4F+OF0PSZTx0QaU4p6GOCfk0RRLn/DdL5sfuyfos0+lMpOKWmbpFCRXI6mAYbU8Doyr2jlW2yO3qObx30G6CTRx04+BrG8eijyIQYhrpssd31R6jwDvMaioeiyNfCESNsNuOyHE5cOGMBMCrZw0fqhJItegJbOcuvpjF+vpw18ubFHqByHADT46GDa6Js3L44imiRZAnXUiekobKpcy71lq6un6DbdGKORnxIX6tKKT3XUZjdk= ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:22.484 INFO:teuthology.orchestra.run.vm00.stderr:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-10T09:46:22.484 INFO:teuthology.orchestra.run.vm00.stderr:Adding key to root@localhost authorized_keys... 2026-03-10T09:46:22.484 INFO:teuthology.orchestra.run.vm00.stderr:Adding host vm00... 2026-03-10T09:46:23.481 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Added host 'vm00' with addr '192.168.123.100' 2026-03-10T09:46:23.514 INFO:teuthology.orchestra.run.vm00.stderr:Deploying unmanaged mon service... 2026-03-10T09:46:23.888 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled mon update... 2026-03-10T09:46:23.922 INFO:teuthology.orchestra.run.vm00.stderr:Deploying unmanaged mgr service... 2026-03-10T09:46:24.219 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled mgr update... 2026-03-10T09:46:24.910 INFO:teuthology.orchestra.run.vm00.stderr:Enabling the dashboard module... 2026-03-10T09:46:26.028 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ignoring --setuser ceph since I am not root 2026-03-10T09:46:26.029 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ignoring --setgroup ceph since I am not root 2026-03-10T09:46:26.277 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:26.121+0000 7f3bb749c000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:46:26.454 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T09:46:26.454 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 9, 2026-03-10T09:46:26.454 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-10T09:46:26.454 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-10T09:46:26.454 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-10T09:46:26.454 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T09:46:26.491 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for the mgr to restart... 2026-03-10T09:46:26.491 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr epoch 9... 2026-03-10T09:46:26.536 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:26.375+0000 7f3bb749c000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:46:27.032 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:27.017+0000 7f3bb749c000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:46:27.564 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:27.397+0000 7f3bb749c000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:46:27.819 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:27.597+0000 7f3bb749c000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:46:27.819 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:27.659+0000 7f3bb749c000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:46:28.071 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:27.864+0000 7f3bb749c000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:46:28.573 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:28.474+0000 7f3bb749c000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:46:28.827 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:28.673+0000 7f3bb749c000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:46:28.827 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:28.733+0000 7f3bb749c000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:46:28.827 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:28.795+0000 7f3bb749c000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:46:29.080 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:28.867+0000 7f3bb749c000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:46:29.081 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:28.929+0000 7f3bb749c000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:46:29.334 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:29.262+0000 7f3bb749c000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:46:29.334 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:29.333+0000 7f3bb749c000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:46:30.090 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:29.918+0000 7f3bb749c000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:46:30.090 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:29.985+0000 7f3bb749c000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:46:30.090 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:30.052+0000 7f3bb749c000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:46:30.340 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:30.186+0000 7f3bb749c000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:46:30.340 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:30.250+0000 7f3bb749c000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:46:30.592 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:30.356+0000 7f3bb749c000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:46:30.592 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:30.447+0000 7f3bb749c000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:46:30.842 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:30.777+0000 7f3bb749c000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:46:30.843 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:46:30.842+0000 7f3bb749c000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:46:32.320 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T09:46:32.321 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap_epoch": 11, 2026-03-10T09:46:32.321 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "initialized": true 2026-03-10T09:46:32.321 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T09:46:32.395 INFO:teuthology.orchestra.run.vm00.stderr:mgr epoch 9 is available 2026-03-10T09:46:32.395 INFO:teuthology.orchestra.run.vm00.stderr:Generating a dashboard self-signed certificate... 2026-03-10T09:46:32.715 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:32 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:46:32] ENGINE Bus STARTING 2026-03-10T09:46:32.758 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Self-signed certificate created 2026-03-10T09:46:32.794 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial admin user... 2026-03-10T09:46:32.967 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:32 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:46:32] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:46:32.967 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:32 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:46:32] ENGINE Bus STARTED 2026-03-10T09:46:33.564 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: {"username": "admin", "password": "$2b$12$gu4XK26w26cec/OcWuxrh.VIJTvTgBO.pXtc8QToEmQyM.TRibuXO", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773135993, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-10T09:46:33.594 INFO:teuthology.orchestra.run.vm00.stderr:Fetching dashboard port number... 2026-03-10T09:46:33.855 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 8443 2026-03-10T09:46:33.968 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T09:46:33.968 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-10T09:46:33.970 INFO:teuthology.orchestra.run.vm00.stderr:Ceph Dashboard is now available at: 2026-03-10T09:46:33.970 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T09:46:33.970 INFO:teuthology.orchestra.run.vm00.stderr: URL: https://vm00.local:8443/ 2026-03-10T09:46:33.970 INFO:teuthology.orchestra.run.vm00.stderr: User: admin 2026-03-10T09:46:33.970 INFO:teuthology.orchestra.run.vm00.stderr: Password: wk551ql57f 2026-03-10T09:46:33.970 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T09:46:33.970 INFO:teuthology.orchestra.run.vm00.stderr:Enabling autotune for osd_memory_target 2026-03-10T09:46:34.618 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: set mgr/dashboard/cluster/status 2026-03-10T09:46:34.661 INFO:teuthology.orchestra.run.vm00.stderr:You can access the Ceph CLI with: 2026-03-10T09:46:34.661 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T09:46:34.661 INFO:teuthology.orchestra.run.vm00.stderr: sudo /home/ubuntu/cephtest/cephadm shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-10T09:46:34.661 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T09:46:34.661 INFO:teuthology.orchestra.run.vm00.stderr:Please consider enabling telemetry to help improve Ceph: 2026-03-10T09:46:34.662 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T09:46:34.662 INFO:teuthology.orchestra.run.vm00.stderr: ceph telemetry on 2026-03-10T09:46:34.662 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T09:46:34.662 INFO:teuthology.orchestra.run.vm00.stderr:For more information see: 2026-03-10T09:46:34.662 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T09:46:34.662 INFO:teuthology.orchestra.run.vm00.stderr: https://docs.ceph.com/docs/master/mgr/telemetry/ 2026-03-10T09:46:34.662 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T09:46:34.662 INFO:teuthology.orchestra.run.vm00.stderr:Bootstrap complete. 2026-03-10T09:46:34.696 INFO:tasks.cephadm:Fetching config... 2026-03-10T09:46:34.696 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:46:34.696 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-10T09:46:34.722 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-10T09:46:34.722 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:46:34.722 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-10T09:46:34.790 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-10T09:46:34.790 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:46:34.790 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/keyring of=/dev/stdout 2026-03-10T09:46:34.929 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-10T09:46:34.929 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:46:34.929 DEBUG:teuthology.orchestra.run.vm00:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-10T09:46:35.002 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-10T09:46:35.003 DEBUG:teuthology.orchestra.run.vm00:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCfd3myUt3TUrOTfqpOmPK7pGKCqBi9nq137mBQLasbtXoUK5BLxV6N73cOi9DUmGzLYpwT3FQHfifIA/O2KCZvdHPU9iXlVsBHKUOjOlnvqtYjMwjgsCogB9kgAcMwLTvtBMZiYm0fGnJIZTbc8lBVn08SGkPxQyPGLPeUk3HF8VqJvYPpVFOeVkS3R00XjeEaLBw0jSYPMQXNammOnRaPFxSR2AfyZz3FB/AExXTGw2JOPzvswMI3AU7yONryaqJSR9nn9akWOgt0J4F+OF0PSZTx0QaU4p6GOCfk0RRLn/DdL5sfuyfos0+lMpOKWmbpFCRXI6mAYbU8Doyr2jlW2yO3qObx30G6CTRx04+BrG8eijyIQYhrpssd31R6jwDvMaioeiyNfCESNsNuOyHE5cOGMBMCrZw0fqhJItegJbOcuvpjF+vpw18ubFHqByHADT46GDa6Js3L44imiRZAnXUiekobKpcy71lq6un6DbdGKORnxIX6tKKT3XUZjdk= ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T09:46:35.170 INFO:teuthology.orchestra.run.vm00.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCfd3myUt3TUrOTfqpOmPK7pGKCqBi9nq137mBQLasbtXoUK5BLxV6N73cOi9DUmGzLYpwT3FQHfifIA/O2KCZvdHPU9iXlVsBHKUOjOlnvqtYjMwjgsCogB9kgAcMwLTvtBMZiYm0fGnJIZTbc8lBVn08SGkPxQyPGLPeUk3HF8VqJvYPpVFOeVkS3R00XjeEaLBw0jSYPMQXNammOnRaPFxSR2AfyZz3FB/AExXTGw2JOPzvswMI3AU7yONryaqJSR9nn9akWOgt0J4F+OF0PSZTx0QaU4p6GOCfk0RRLn/DdL5sfuyfos0+lMpOKWmbpFCRXI6mAYbU8Doyr2jlW2yO3qObx30G6CTRx04+BrG8eijyIQYhrpssd31R6jwDvMaioeiyNfCESNsNuOyHE5cOGMBMCrZw0fqhJItegJbOcuvpjF+vpw18ubFHqByHADT46GDa6Js3L44imiRZAnXUiekobKpcy71lq6un6DbdGKORnxIX6tKKT3XUZjdk= ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:35.185 DEBUG:teuthology.orchestra.run.vm03:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCfd3myUt3TUrOTfqpOmPK7pGKCqBi9nq137mBQLasbtXoUK5BLxV6N73cOi9DUmGzLYpwT3FQHfifIA/O2KCZvdHPU9iXlVsBHKUOjOlnvqtYjMwjgsCogB9kgAcMwLTvtBMZiYm0fGnJIZTbc8lBVn08SGkPxQyPGLPeUk3HF8VqJvYPpVFOeVkS3R00XjeEaLBw0jSYPMQXNammOnRaPFxSR2AfyZz3FB/AExXTGw2JOPzvswMI3AU7yONryaqJSR9nn9akWOgt0J4F+OF0PSZTx0QaU4p6GOCfk0RRLn/DdL5sfuyfos0+lMpOKWmbpFCRXI6mAYbU8Doyr2jlW2yO3qObx30G6CTRx04+BrG8eijyIQYhrpssd31R6jwDvMaioeiyNfCESNsNuOyHE5cOGMBMCrZw0fqhJItegJbOcuvpjF+vpw18ubFHqByHADT46GDa6Js3L44imiRZAnXUiekobKpcy71lq6un6DbdGKORnxIX6tKKT3XUZjdk= ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T09:46:35.224 INFO:teuthology.orchestra.run.vm03.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCfd3myUt3TUrOTfqpOmPK7pGKCqBi9nq137mBQLasbtXoUK5BLxV6N73cOi9DUmGzLYpwT3FQHfifIA/O2KCZvdHPU9iXlVsBHKUOjOlnvqtYjMwjgsCogB9kgAcMwLTvtBMZiYm0fGnJIZTbc8lBVn08SGkPxQyPGLPeUk3HF8VqJvYPpVFOeVkS3R00XjeEaLBw0jSYPMQXNammOnRaPFxSR2AfyZz3FB/AExXTGw2JOPzvswMI3AU7yONryaqJSR9nn9akWOgt0J4F+OF0PSZTx0QaU4p6GOCfk0RRLn/DdL5sfuyfos0+lMpOKWmbpFCRXI6mAYbU8Doyr2jlW2yO3qObx30G6CTRx04+BrG8eijyIQYhrpssd31R6jwDvMaioeiyNfCESNsNuOyHE5cOGMBMCrZw0fqhJItegJbOcuvpjF+vpw18ubFHqByHADT46GDa6Js3L44imiRZAnXUiekobKpcy71lq6un6DbdGKORnxIX6tKKT3XUZjdk= ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:35.236 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-10T09:46:35.981 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-10T09:46:35.981 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-10T09:46:36.018 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:35 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:46:35] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:37.007 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm03 2026-03-10T09:46:37.007 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T09:46:37.007 DEBUG:teuthology.orchestra.run.vm03:> dd of=/etc/ceph/ceph.conf 2026-03-10T09:46:37.025 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T09:46:37.025 DEBUG:teuthology.orchestra.run.vm03:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:46:37.083 INFO:tasks.cephadm:Adding host vm03 to orchestrator... 2026-03-10T09:46:37.083 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch host add vm03 2026-03-10T09:46:37.300 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:37 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:46:37] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:37.811 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:37 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:46:37] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:38.627 INFO:teuthology.orchestra.run.vm00.stdout:Added host 'vm03' with addr '192.168.123.103' 2026-03-10T09:46:38.688 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch host ls --format=json 2026-03-10T09:46:39.306 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:46:39.306 INFO:teuthology.orchestra.run.vm00.stdout:[{"addr": "192.168.123.100", "hostname": "vm00", "labels": [], "status": ""}, {"addr": "192.168.123.103", "hostname": "vm03", "labels": [], "status": ""}] 2026-03-10T09:46:39.368 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-10T09:46:39.368 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd crush tunables default 2026-03-10T09:46:40.947 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:40 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:40.960 INFO:teuthology.orchestra.run.vm00.stderr:adjusted tunables profile to default 2026-03-10T09:46:41.043 INFO:tasks.cephadm:Adding mon.a on vm00 2026-03-10T09:46:41.043 INFO:tasks.cephadm:Adding mon.c on vm00 2026-03-10T09:46:41.043 INFO:tasks.cephadm:Adding mon.b on vm03 2026-03-10T09:46:41.043 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch apply mon '3;vm00:192.168.123.100=a;vm00:[v2:192.168.123.100:3301,v1:192.168.123.100:6790]=c;vm03:192.168.123.103=b' 2026-03-10T09:46:41.448 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:46:41] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:41.634 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled mon update... 2026-03-10T09:46:41.699 DEBUG:teuthology.orchestra.run.vm00:mon.c> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.c.service 2026-03-10T09:46:41.701 DEBUG:teuthology.orchestra.run.vm03:mon.b> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.b.service 2026-03-10T09:46:41.703 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-10T09:46:41.703 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph mon dump -f json 2026-03-10T09:46:41.723 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:46:41] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:41.724 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:46:41] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1427833686' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:46:42.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:42.352 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T09:46:42.352 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"e2d4b2ee-1c65-11f1-bae0-b525704df8fa","modified":"2026-03-10T09:46:01.447541Z","created":"2026-03-10T09:46:01.447541Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T09:46:42.356 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-10T09:46:42.737 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.row_cache: None 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.wal_filter: None 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.preserve_deletes: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.two_write_queues: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.atomic_flush: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.log_readahead_size: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_background_jobs: 2 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_background_compactions: -1 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_subcompactions: 1 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_open_files: -1 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_background_flushes: -1 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Compression algorithms supported: 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: kZSTD supported: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: kXpressCompression supported: 0 2026-03-10T09:46:42.988 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: kLZ4Compression supported: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: kBZip2Compression supported: 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: kZlibCompression supported: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: kSnappyCompression supported: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/version_set.cc:4725] Recovering from manifest file: /var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000003 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/column_family.cc:597] --------------- Options for column family [default]: 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.merge_operator: 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_filter: None 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x560a91db7d00) 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: cache_index_and_filter_blocks: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: pin_top_level_index_and_filter: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: index_type: 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: data_block_index_type: 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: index_shortening: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: hash_index_allow_collision: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: checksum: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: no_block_cache: 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache: 0x560a91e22170 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_name: BinnedLRUCache 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_options: 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: capacity : 536870912 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: num_shard_bits : 4 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: strict_capacity_limit : 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: high_pri_pool_ratio: 0.000 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_compressed: (nil) 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: persistent_cache: (nil) 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: block_size: 4096 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: block_size_deviation: 10 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: block_restart_interval: 16 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: index_block_restart_interval: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: metadata_block_size: 4096 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: partition_filters: 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: use_delta_encoding: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: filter_policy: rocksdb.BuiltinBloomFilter 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: whole_key_filtering: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: verify_compression: 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: read_amp_bytes_per_bit: 0 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: format_version: 4 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: enable_index_compression: 1 2026-03-10T09:46:42.989 INFO:journalctl@ceph.mon.c.vm00.stdout: block_align: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compression: NoCompression 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.num_levels: 7 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.arena_block_size: 4194304 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.rate_limit_delay_max_milliseconds: 100 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T09:46:42.990 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.table_properties_collectors: 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.inplace_update_support: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.bloom_locality: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.max_successive_merges: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.ttl: 2592000 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.enable_blob_files: false 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.min_blob_size: 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/version_set.cc:4773] Recovered from manifest file:/var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000003 succeeded,manifest_file_number is 3, next_file_number is 5, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/version_set.cc:4782] Column family [default] (ID 0), log number is 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/version_set.cc:4083] Creating manifest 7 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136002747415, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/db_impl/db_impl_open.cc:847] Recovering log #4 mode 2 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [table/block_based/filter_policy.cc:996] Using legacy Bloom filter with high (20) bits/key. Dramatic filter space and/or accuracy improvement is available with format_version>=5. 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136002747992, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1540, "file_checksum": "", "file_checksum_func_name": "Unknown", "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1773136002, "oldest_key_time": 0, "file_creation_time": 0, "db_id": "8d22c9c5-d480-4ae9-92e8-040457166903", "db_session_id": "8MC2D3K5NR07JS2608D2"}} 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/version_set.cc:4083] Creating manifest 9 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136002749640, "job": 1, "event": "recovery_finished"} 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [file/delete_scheduler.cc:73] Deleted file /var/lib/ceph/mon/ceph-c/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/db_impl/db_impl_open.cc:1701] SstFileManager instance 0x560a91e08700 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: DB pointer 0x560a91e7c000 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c does not exist in monmap, will attempt to join an existing cluster 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: using public_addrv [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/db_impl/db_impl.cc:902] ------- DUMPING STATS ------- 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: rocksdb: [db/db_impl/db_impl.cc:903] 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: ** DB Stats ** 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: L0 1/0 1.50 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Sum 1/0 1.50 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T09:46:42.991 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative compaction: 0.00 GB write, 0.09 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval compaction: 0.00 GB write, 0.09 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: L0 1/0 1.50 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Sum 1/0 1.50 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative compaction: 0.00 GB write, 0.09 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: starting mon.c rank -1 at public addrs [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] at bind addrs [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon_data /var/lib/ceph/mon/ceph-c fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(???) e0 preinit fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).mds e1 new map 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).mds e1 print_map 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: e1 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: legacy client fscid: -1 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout: No filesystems configured 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-10T09:46:42.992 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e4 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mkfs e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: monmap e1: 1 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0]} 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: fsmap 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: osdmap e1: 0 total, 0 up, 0 in 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e1: no daemons active 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1444856539' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: monmap e1: 1 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0]} 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: fsmap 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: osdmap e1: 0 total, 0 up, 0 in 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e1: no daemons active 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2203278558' entity='client.admin' 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1257390815' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/23986560' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Activating manager daemon y 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e2: y(active, starting, since 0.00396863s) 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Manager daemon y is now available 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3589579837' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14100 192.168.123.100:0/2589269224' entity='mgr.y' 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e3: y(active, since 1.00831s) 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e4: y(active, since 2s) 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/199471261' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/631920274' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/631920274' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1778216310' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1778216310' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e5: y(active, since 4s) 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3785334489' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Active manager daemon y restarted 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Activating manager daemon y 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: osdmap e2: 0 total, 0 up, 0 in 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e6: y(active, starting, since 0.0578456s) 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Manager daemon y is now available 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.993 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: [10/Mar/2026:09:46:20] ENGINE Bus STARTING 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: [10/Mar/2026:09:46:20] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: [10/Mar/2026:09:46:20] ENGINE Bus STARTED 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e7: y(active, since 1.06561s) 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Generating ssh key... 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e8: y(active, since 2s) 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm00", "addr": "192.168.123.100", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Deploying cephadm binary to vm00 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Added host vm00 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Saving service mon spec with placement count:5 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Saving service mgr spec with placement count:2 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1581649846' entity='client.admin' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1543038865' entity='client.admin' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1349421902' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14120 192.168.123.100:0/2710851358' entity='mgr.y' 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1349421902' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e9: y(active, since 5s) 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4202137807' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Active manager daemon y restarted 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Activating manager daemon y 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: osdmap e3: 0 total, 0 up, 0 in 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e10: y(active, starting, since 0.455492s) 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:46:42.994 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Manager daemon y is now available 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]': finished 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e11: y(active, since 1.46401s) 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: [10/Mar/2026:09:46:32] ENGINE Bus STARTING 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: [10/Mar/2026:09:46:32] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: [10/Mar/2026:09:46:32] ENGINE Bus STARTED 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Deploying daemon agent.vm00 on vm00 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e12: y(active, since 2s) 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3426471752' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2555414577' entity='client.admin' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1167005457' entity='client.admin' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mgrmap e13: y(active, since 6s) 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm03", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Deploying cephadm binary to vm03 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Added host vm03 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.995 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]': finished 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Deploying daemon agent.vm03 on vm03 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1427833686' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1427833686' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing).paxosservice(auth 1..5) refresh upgraded, format 0 -> 3 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta expand map: {default=false} 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta from 'false' to 'false' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta expanded map: {default=false} 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta expand map: {default=info} 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta from 'info' to 'info' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta expanded map: {default=info} 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta expand map: {default=daemon} 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta from 'daemon' to 'daemon' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta expanded map: {default=daemon} 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta expand map: {default=debug} 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta from 'debug' to 'debug' 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: expand_channel_meta expanded map: {default=debug} 2026-03-10T09:46:42.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:42 vm00 ceph-mon[56720]: mon.c@-1(synchronizing) e1 handle_conf_change mon_allow_pool_delete,mon_cluster_log_to_file 2026-03-10T09:46:43.433 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-10T09:46:43.433 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph mon dump -f json 2026-03-10T09:46:44.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:44 vm03 ceph-mon[50536]: mon.b@-1(synchronizing).paxosservice(auth 1..5) refresh upgraded, format 0 -> 3 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: Deploying daemon mon.b on vm03 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: mon.a calling monitor election 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: mon.c calling monitor election 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:47.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: monmap e2: 2 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: fsmap 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: mgrmap e13: y(active, since 16s) 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: overall HEALTH_OK 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: Deploying daemon mon.b on vm03 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: mon.a calling monitor election 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: mon.c calling monitor election 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: monmap e2: 2 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: fsmap 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:48.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: mgrmap e13: y(active, since 16s) 2026-03-10T09:46:48.114 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: overall HEALTH_OK 2026-03-10T09:46:48.114 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:48.114 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:48.114 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:48.114 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:53.414 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:46:53] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:53.414 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:46:53] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:53.522 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T09:46:53.523 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":3,"fsid":"e2d4b2ee-1c65-11f1-bae0-b525704df8fa","modified":"2026-03-10T09:46:48.290879Z","created":"2026-03-10T09:46:01.447541Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3301","nonce":0},{"type":"v1","addr":"192.168.123.100:6790","nonce":0}]},"addr":"192.168.123.100:6790/0","public_addr":"192.168.123.100:6790/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":2,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-10T09:46:53.524 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 3 2026-03-10T09:46:53.603 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-10T09:46:53.603 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph config generate-minimal-conf 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: Reconfiguring mon.a (unknown last config time)... 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: Reconfiguring daemon mon.a on vm00 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: mon.a calling monitor election 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: mon.c calling monitor election 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: fsmap 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: mgrmap e13: y(active, since 22s) 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: overall HEALTH_OK 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: Reconfiguring mon.a (unknown last config time)... 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: Reconfiguring daemon mon.a on vm00 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: mon.a calling monitor election 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: mon.c calling monitor election 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T09:46:53.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: fsmap 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: mgrmap e13: y(active, since 22s) 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: overall HEALTH_OK 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:53 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:46:53] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:53.673 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:46:53] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:54.199 INFO:teuthology.orchestra.run.vm00.stdout:# minimal ceph.conf for e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:54.199 INFO:teuthology.orchestra.run.vm00.stdout:[global] 2026-03-10T09:46:54.199 INFO:teuthology.orchestra.run.vm00.stdout: fsid = e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:46:54.199 INFO:teuthology.orchestra.run.vm00.stdout: mon_host = [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] 2026-03-10T09:46:54.260 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-10T09:46:54.260 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:46:54.260 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T09:46:54.302 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:46:54.302 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:46:54.413 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T09:46:54.413 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T09:46:54.440 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T09:46:54.440 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:46:54.506 INFO:tasks.cephadm:Adding mgr.y on vm00 2026-03-10T09:46:54.506 INFO:tasks.cephadm:Adding mgr.x on vm03 2026-03-10T09:46:54.506 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch apply mgr '2;vm00=y;vm03=x' 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: Reconfiguring mon.c (monmap changed)... 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: Reconfiguring daemon mon.c on vm00 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/3770283761' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: Reconfiguring mon.b (monmap changed)... 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: Reconfiguring daemon mon.b on vm03 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1473001590' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:54.677 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: Reconfiguring mon.c (monmap changed)... 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: Reconfiguring daemon mon.c on vm00 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/3770283761' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: Reconfiguring mon.b (monmap changed)... 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: Reconfiguring daemon mon.b on vm03 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1473001590' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:54.678 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.110 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled mgr update... 2026-03-10T09:46:55.496 DEBUG:teuthology.orchestra.run.vm03:mgr.x> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service 2026-03-10T09:46:55.498 INFO:tasks.cephadm:Deploying OSDs... 2026-03-10T09:46:55.498 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:46:55.498 DEBUG:teuthology.orchestra.run.vm00:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T09:46:55.526 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T09:46:55.527 DEBUG:teuthology.orchestra.run.vm00:> ls /dev/[sv]d? 2026-03-10T09:46:55.590 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vda 2026-03-10T09:46:55.590 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdb 2026-03-10T09:46:55.590 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdc 2026-03-10T09:46:55.590 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdd 2026-03-10T09:46:55.590 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vde 2026-03-10T09:46:55.590 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T09:46:55.590 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T09:46:55.590 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdb 2026-03-10T09:46:55.655 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdb 2026-03-10T09:46:55.655 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T09:46:55.655 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 223 Links: 1 Device type: fc,10 2026-03-10T09:46:55.655 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T09:46:55.655 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T09:46:55.655 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 09:46:49.519203402 +0000 2026-03-10T09:46:55.655 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 09:46:49.312202989 +0000 2026-03-10T09:46:55.655 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 09:46:49.312202989 +0000 2026-03-10T09:46:55.655 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 09:42:13.210000000 +0000 2026-03-10T09:46:55.655 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T09:46:55.735 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T09:46:55.735 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T09:46:55.735 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000178364 s, 2.9 MB/s 2026-03-10T09:46:55.737 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: Deploying daemon mon.b on vm03 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: mon.a calling monitor election 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: mon.c calling monitor election 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: monmap e2: 2 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: fsmap 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: mgrmap e13: y(active, since 16s) 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: overall HEALTH_OK 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:55.761 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: Reconfiguring mon.a (unknown last config time)... 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: Reconfiguring daemon mon.a on vm00 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: mon.a calling monitor election 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: mon.c calling monitor election 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: fsmap 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: mgrmap e13: y(active, since 22s) 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: overall HEALTH_OK 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:55.762 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: Reconfiguring mon.c (monmap changed)... 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: Reconfiguring daemon mon.c on vm00 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/3770283761' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: Reconfiguring mon.b (monmap changed)... 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: Reconfiguring daemon mon.b on vm03 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1473001590' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:55.763 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:55.800 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdc 2026-03-10T09:46:55.871 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdc 2026-03-10T09:46:55.871 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T09:46:55.871 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 224 Links: 1 Device type: fc,20 2026-03-10T09:46:55.871 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T09:46:55.871 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T09:46:55.871 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 09:46:49.583203530 +0000 2026-03-10T09:46:55.871 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 09:46:49.310202984 +0000 2026-03-10T09:46:55.871 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 09:46:49.310202984 +0000 2026-03-10T09:46:55.871 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 09:42:13.213000000 +0000 2026-03-10T09:46:55.871 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T09:46:55.939 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T09:46:55.939 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T09:46:55.939 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000162033 s, 3.2 MB/s 2026-03-10T09:46:55.940 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T09:46:56.002 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdd 2026-03-10T09:46:56.064 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdd 2026-03-10T09:46:56.065 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T09:46:56.065 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-10T09:46:56.065 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T09:46:56.065 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T09:46:56.065 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 09:46:49.646203656 +0000 2026-03-10T09:46:56.065 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 09:46:49.324203012 +0000 2026-03-10T09:46:56.065 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 09:46:49.324203012 +0000 2026-03-10T09:46:56.065 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 09:42:13.236000000 +0000 2026-03-10T09:46:56.065 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T09:46:56.136 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T09:46:56.136 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T09:46:56.136 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000227066 s, 2.3 MB/s 2026-03-10T09:46:56.138 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T09:46:56.211 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vde 2026-03-10T09:46:56.272 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:56 vm03 systemd[1]: Starting Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:46:56.273 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vde 2026-03-10T09:46:56.273 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T09:46:56.273 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T09:46:56.273 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T09:46:56.273 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T09:46:56.273 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 09:46:49.711203786 +0000 2026-03-10T09:46:56.273 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 09:46:49.324203012 +0000 2026-03-10T09:46:56.273 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 09:46:49.324203012 +0000 2026-03-10T09:46:56.273 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 09:42:13.273000000 +0000 2026-03-10T09:46:56.273 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T09:46:56.349 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T09:46:56.349 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T09:46:56.349 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000602408 s, 850 kB/s 2026-03-10T09:46:56.351 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T09:46:56.417 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T09:46:56.418 DEBUG:teuthology.orchestra.run.vm03:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T09:46:56.443 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T09:46:56.444 DEBUG:teuthology.orchestra.run.vm03:> ls /dev/[sv]d? 2026-03-10T09:46:56.508 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vda 2026-03-10T09:46:56.508 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdb 2026-03-10T09:46:56.508 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdc 2026-03-10T09:46:56.508 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdd 2026-03-10T09:46:56.508 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vde 2026-03-10T09:46:56.508 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T09:46:56.508 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T09:46:56.509 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdb 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: mon.b calling monitor election 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='client.14202 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm00=y;vm03=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: Saving service mgr spec with placement vm00=y;vm03=x;count:2 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: mon.c calling monitor election 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: mon.a calling monitor election 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: mon.b calling monitor election 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: fsmap 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: mgrmap e13: y(active, since 24s) 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: overall HEALTH_OK 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: Deploying daemon mgr.x on vm03 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:46:56.546 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:56.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:56 vm03 podman[51690]: 2026-03-10 09:46:56.275100083 +0000 UTC m=+0.020145429 container create c7a9ee9d417710f884d3f9b288b6d9c692cb105b204b2daadf77ccf191e542dd (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, vendor=Red Hat, Inc., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, name=centos-stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.display-name=CentOS Stream 8, build-date=2022-05-03T08:36:31.336870, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_BRANCH=HEAD, io.openshift.expose-services=, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_CLEAN=True, release=754, vcs-type=git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, distribution-scope=public, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, ceph=True, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream, com.redhat.component=centos-stream-container, maintainer=Guillaume Abrioux , RELEASE=HEAD) 2026-03-10T09:46:56.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:56 vm03 podman[51690]: 2026-03-10 09:46:56.329889184 +0000 UTC m=+0.074934539 container init c7a9ee9d417710f884d3f9b288b6d9c692cb105b204b2daadf77ccf191e542dd (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, maintainer=Guillaume Abrioux , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, release=754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, com.redhat.component=centos-stream-container, RELEASE=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, architecture=x86_64, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream, vendor=Red Hat, Inc., io.openshift.expose-services=, vcs-type=git, name=centos-stream, version=8, ceph=True, CEPH_POINT_RELEASE=-17.2.0) 2026-03-10T09:46:56.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:56 vm03 podman[51690]: 2026-03-10 09:46:56.333620032 +0000 UTC m=+0.078665378 container start c7a9ee9d417710f884d3f9b288b6d9c692cb105b204b2daadf77ccf191e542dd (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, CEPH_POINT_RELEASE=-17.2.0, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.tags=base centos centos-stream, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.component=centos-stream-container, RELEASE=HEAD, io.openshift.expose-services=, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.19.8, maintainer=Guillaume Abrioux , io.k8s.display-name=CentOS Stream 8, vendor=Red Hat, Inc., GIT_BRANCH=HEAD, release=754, architecture=x86_64, distribution-scope=public, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-type=git, name=centos-stream, ceph=True) 2026-03-10T09:46:56.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:56 vm03 bash[51690]: c7a9ee9d417710f884d3f9b288b6d9c692cb105b204b2daadf77ccf191e542dd 2026-03-10T09:46:56.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:56 vm03 podman[51690]: 2026-03-10 09:46:56.265902019 +0000 UTC m=+0.010947375 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a 2026-03-10T09:46:56.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:56 vm03 systemd[1]: Started Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:46:56.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:56.446+0000 7fce60c39000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:46:56.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:56.508+0000 7fce60c39000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:46:56.569 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdb 2026-03-10T09:46:56.569 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T09:46:56.569 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-10T09:46:56.569 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T09:46:56.569 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T09:46:56.570 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 09:46:49.587240612 +0000 2026-03-10T09:46:56.570 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 09:46:49.314240728 +0000 2026-03-10T09:46:56.570 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 09:46:49.314240728 +0000 2026-03-10T09:46:56.570 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 09:41:42.214000000 +0000 2026-03-10T09:46:56.570 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T09:46:56.636 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T09:46:56.637 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T09:46:56.637 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000112221 s, 4.6 MB/s 2026-03-10T09:46:56.637 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T09:46:56.698 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdc 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: mon.b calling monitor election 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='client.14202 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm00=y;vm03=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: Saving service mgr spec with placement vm00=y;vm03=x;count:2 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: mon.c calling monitor election 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: mon.a calling monitor election 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: mon.b calling monitor election 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: fsmap 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: mgrmap e13: y(active, since 24s) 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: overall HEALTH_OK 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: Deploying daemon mgr.x on vm03 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:46:56.718 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: mon.b calling monitor election 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='client.14202 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm00=y;vm03=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: Saving service mgr spec with placement vm00=y;vm03=x;count:2 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: mon.c calling monitor election 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: mon.a calling monitor election 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: mon.b calling monitor election 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: fsmap 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: mgrmap e13: y(active, since 24s) 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: overall HEALTH_OK 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: Deploying daemon mgr.x on vm03 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:46:56.719 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:56.762 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdc 2026-03-10T09:46:56.762 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T09:46:56.762 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 222 Links: 1 Device type: fc,20 2026-03-10T09:46:56.762 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T09:46:56.762 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T09:46:56.762 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 09:46:49.669240577 +0000 2026-03-10T09:46:56.762 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 09:46:49.329240722 +0000 2026-03-10T09:46:56.762 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 09:46:49.329240722 +0000 2026-03-10T09:46:56.762 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 09:41:42.220000000 +0000 2026-03-10T09:46:56.762 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T09:46:56.829 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T09:46:56.829 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T09:46:56.829 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000165109 s, 3.1 MB/s 2026-03-10T09:46:56.830 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T09:46:56.897 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdd 2026-03-10T09:46:56.960 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdd 2026-03-10T09:46:56.960 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T09:46:56.960 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 225 Links: 1 Device type: fc,30 2026-03-10T09:46:56.960 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T09:46:56.960 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T09:46:56.960 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 09:46:49.745240545 +0000 2026-03-10T09:46:56.960 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 09:46:49.324240724 +0000 2026-03-10T09:46:56.960 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 09:46:49.324240724 +0000 2026-03-10T09:46:56.960 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 09:41:42.231000000 +0000 2026-03-10T09:46:56.961 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T09:46:57.046 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T09:46:57.046 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T09:46:57.046 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000107592 s, 4.8 MB/s 2026-03-10T09:46:57.047 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T09:46:57.075 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vde 2026-03-10T09:46:57.154 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vde 2026-03-10T09:46:57.154 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T09:46:57.154 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T09:46:57.154 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T09:46:57.154 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T09:46:57.154 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 09:46:49.807240518 +0000 2026-03-10T09:46:57.154 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 09:46:49.323240724 +0000 2026-03-10T09:46:57.154 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 09:46:49.323240724 +0000 2026-03-10T09:46:57.154 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 09:41:42.300000000 +0000 2026-03-10T09:46:57.154 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T09:46:57.217 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:57.002+0000 7fce60c39000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:46:57.235 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T09:46:57.235 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T09:46:57.235 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000171632 s, 3.0 MB/s 2026-03-10T09:46:57.237 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T09:46:57.311 INFO:tasks.cephadm:Deploying osd.0 on vm00 with /dev/vde... 2026-03-10T09:46:57.311 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- lvm zap /dev/vde 2026-03-10T09:46:57.883 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:57.764+0000 7fce60c39000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:46:57.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:46:57.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[56720]: Reconfiguring mgr.y (unknown last config time)... 2026-03-10T09:46:57.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[56720]: Reconfiguring daemon mgr.y on vm00 2026-03-10T09:46:57.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:57.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:57.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:57.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:57.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:57.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:46:57.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:46:57.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[52384]: Reconfiguring mgr.y (unknown last config time)... 2026-03-10T09:46:57.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[52384]: Reconfiguring daemon mgr.y on vm00 2026-03-10T09:46:57.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:57.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:57.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:57.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:57.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:57.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:46:58.014 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:46:58.042 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch daemon add osd vm00:/dev/vde 2026-03-10T09:46:58.235 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:57 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:46:57] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:58.235 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:46:58 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:46:58] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:57 vm03 ceph-mon[50536]: Reconfiguring mgr.y (unknown last config time)... 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:57 vm03 ceph-mon[50536]: Reconfiguring daemon mgr.y on vm00 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:57 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:57 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:57 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:57 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:57 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:58.004+0000 7fce60c39000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:46:58.267 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:58.082+0000 7fce60c39000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:46:58.544 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:58.270+0000 7fce60c39000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:46:58.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:58.934+0000 7fce60c39000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:46:59.213 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:59.146+0000 7fce60c39000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:46:59.544 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:59.215+0000 7fce60c39000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:46:59.544 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:59.278+0000 7fce60c39000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:46:59.544 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:59.346+0000 7fce60c39000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:46:59.544 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:59.407+0000 7fce60c39000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[52384]: from='client.24103 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/252501430' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "74379a85-5860-4155-922a-ce2adedc2262"}]: dispatch 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/252501430' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "74379a85-5860-4155-922a-ce2adedc2262"}]': finished 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[52384]: osdmap e5: 1 total, 0 up, 1 in 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[56720]: from='client.24103 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/252501430' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "74379a85-5860-4155-922a-ce2adedc2262"}]: dispatch 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/252501430' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "74379a85-5860-4155-922a-ce2adedc2262"}]': finished 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[56720]: osdmap e5: 1 total, 0 up, 1 in 2026-03-10T09:47:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:46:59 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:00.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:59 vm03 ceph-mon[50536]: from='client.24103 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:00.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:59 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/252501430' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "74379a85-5860-4155-922a-ce2adedc2262"}]: dispatch 2026-03-10T09:47:00.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:59 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/252501430' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "74379a85-5860-4155-922a-ce2adedc2262"}]': finished 2026-03-10T09:47:00.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:59 vm03 ceph-mon[50536]: osdmap e5: 1 total, 0 up, 1 in 2026-03-10T09:47:00.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:46:59 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:00.044 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:59.738+0000 7fce60c39000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:47:00.044 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:46:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:46:59.817+0000 7fce60c39000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:47:00.701 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:47:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:47:00.418+0000 7fce60c39000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:47:00.701 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:47:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:47:00.483+0000 7fce60c39000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:47:00.701 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:47:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:47:00.554+0000 7fce60c39000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:47:00.976 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:47:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:47:00.703+0000 7fce60c39000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:47:00.976 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:47:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:47:00.771+0000 7fce60c39000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:47:00.976 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:47:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:47:00.881+0000 7fce60c39000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:47:01.009 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:00 vm00 ceph-mon[52384]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:01.009 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:00 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1511036181' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:01.009 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:00 vm00 ceph-mon[56720]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:01.009 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:00 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1511036181' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:01.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:00 vm03 ceph-mon[50536]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:01.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:00 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1511036181' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:01.294 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:47:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:47:00.983+0000 7fce60c39000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:47:01.794 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:47:01 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:47:01.327+0000 7fce60c39000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:47:01.794 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:47:01 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:47:01.399+0000 7fce60c39000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[52384]: Standby manager daemon x started 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[56720]: Standby manager daemon x started 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:47:02.057 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:01 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:47:02.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:01 vm03 ceph-mon[50536]: Standby manager daemon x started 2026-03-10T09:47:02.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:01 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:47:02.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:01 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:47:02.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:01 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:47:02.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:01 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/1595232892' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:47:03.074 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:02 vm00 ceph-mon[52384]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:03.074 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:02 vm00 ceph-mon[52384]: mgrmap e14: y(active, since 31s), standbys: x 2026-03-10T09:47:03.074 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:47:03.074 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:02 vm00 ceph-mon[56720]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:03.074 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:02 vm00 ceph-mon[56720]: mgrmap e14: y(active, since 31s), standbys: x 2026-03-10T09:47:03.074 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:47:03.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:02 vm03 ceph-mon[50536]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:03.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:02 vm03 ceph-mon[50536]: mgrmap e14: y(active, since 31s), standbys: x 2026-03-10T09:47:03.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:47:04.089 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:03 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:47:04.089 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:03 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:04.089 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:03 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:47:04.089 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:03 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:04.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:04 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:47:04.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:04 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:04 vm00 ceph-mon[52384]: Deploying daemon osd.0 on vm00 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:04 vm00 ceph-mon[52384]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:04 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:04 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:04 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:04 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:04 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:05 vm00 ceph-mon[56720]: Deploying daemon osd.0 on vm00 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:05 vm00 ceph-mon[56720]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:05 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:05 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:05 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:05 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:05.130 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:05 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:05.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:05 vm03 ceph-mon[50536]: Deploying daemon osd.0 on vm00 2026-03-10T09:47:05.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:05 vm03 ceph-mon[50536]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:05.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:05 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:05.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:05 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:05.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:05 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:05.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:05 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:05.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:05 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:05.714 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 0 on host 'vm00' 2026-03-10T09:47:05.764 DEBUG:teuthology.orchestra.run.vm00:osd.0> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.0.service 2026-03-10T09:47:05.765 INFO:tasks.cephadm:Deploying osd.1 on vm00 with /dev/vdd... 2026-03-10T09:47:05.765 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- lvm zap /dev/vdd 2026-03-10T09:47:06.479 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:47:06.501 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch daemon add osd vm00:/dev/vdd 2026-03-10T09:47:06.894 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[52384]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:06.894 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:06.894 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:06.894 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:06.894 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:06.894 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:06.894 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:06.894 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[56720]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:06.894 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:06.895 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:06.895 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:06.895 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:06.895 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:06.895 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:06 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:07.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:06 vm03 ceph-mon[50536]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:07.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:06 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:07.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:06 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:07.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:06 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:07.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:06 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:07.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:06 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:07.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:06 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:07.148 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:47:07 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[60002]: 2026-03-10T09:47:07.046+0000 7fb166acb3c0 -1 osd.0 0 log_to_monitors true 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[52384]: from='osd.0 [v2:192.168.123.100:6802/398256759,v1:192.168.123.100:6803/398256759]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[52384]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[56720]: from='osd.0 [v2:192.168.123.100:6802/398256759,v1:192.168.123.100:6803/398256759]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[56720]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:07 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:07.903 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:47:07 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[60002]: 2026-03-10T09:47:07.883+0000 7fb15d4ce700 -1 osd.0 0 waiting for initial osdmap 2026-03-10T09:47:07.903 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:47:07 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[60002]: 2026-03-10T09:47:07.893+0000 7fb158e67700 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:47:08.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:07 vm03 ceph-mon[50536]: from='osd.0 [v2:192.168.123.100:6802/398256759,v1:192.168.123.100:6803/398256759]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:47:08.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:07 vm03 ceph-mon[50536]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:47:08.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:07 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:08.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:07 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:08.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:07 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='client.24118 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='osd.0 [v2:192.168.123.100:6802/398256759,v1:192.168.123.100:6803/398256759]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2342090028' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "718beebe-c05a-490a-835a-00fdd797508b"}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2342090028' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "718beebe-c05a-490a-835a-00fdd797508b"}]': finished 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4284454108' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='client.24118 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='osd.0 [v2:192.168.123.100:6802/398256759,v1:192.168.123.100:6803/398256759]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2342090028' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "718beebe-c05a-490a-835a-00fdd797508b"}]: dispatch 2026-03-10T09:47:08.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:08.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2342090028' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "718beebe-c05a-490a-835a-00fdd797508b"}]': finished 2026-03-10T09:47:08.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T09:47:08.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:08.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:08.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:08.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:08 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4284454108' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='client.24118 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='osd.0 [v2:192.168.123.100:6802/398256759,v1:192.168.123.100:6803/398256759]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2342090028' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "718beebe-c05a-490a-835a-00fdd797508b"}]: dispatch 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2342090028' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "718beebe-c05a-490a-835a-00fdd797508b"}]': finished 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:09.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:08 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4284454108' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:09.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:47:09] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:47:10.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:09 vm03 ceph-mon[50536]: osd.0 [v2:192.168.123.100:6802/398256759,v1:192.168.123.100:6803/398256759] boot 2026-03-10T09:47:10.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:09 vm03 ceph-mon[50536]: osdmap e8: 2 total, 1 up, 2 in 2026-03-10T09:47:10.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:09 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:10.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:09 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:10.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:09 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:10.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:09 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[52384]: osd.0 [v2:192.168.123.100:6802/398256759,v1:192.168.123.100:6803/398256759] boot 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[52384]: osdmap e8: 2 total, 1 up, 2 in 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[56720]: osd.0 [v2:192.168.123.100:6802/398256759,v1:192.168.123.100:6803/398256759] boot 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[56720]: osdmap e8: 2 total, 1 up, 2 in 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:09 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:11.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:10 vm03 ceph-mon[50536]: purged_snaps scrub starts 2026-03-10T09:47:11.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:10 vm03 ceph-mon[50536]: purged_snaps scrub ok 2026-03-10T09:47:11.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:10 vm03 ceph-mon[50536]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:11.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:10 vm00 ceph-mon[52384]: purged_snaps scrub starts 2026-03-10T09:47:11.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:10 vm00 ceph-mon[52384]: purged_snaps scrub ok 2026-03-10T09:47:11.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:10 vm00 ceph-mon[52384]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:11.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:10 vm00 ceph-mon[56720]: purged_snaps scrub starts 2026-03-10T09:47:11.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:10 vm00 ceph-mon[56720]: purged_snaps scrub ok 2026-03-10T09:47:11.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:10 vm00 ceph-mon[56720]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:12.116 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:11 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:47:12.116 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:11 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:12.116 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:11 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:47:12.116 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:11 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:12.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:11 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:47:12.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:11 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:12.986 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:12 vm00 ceph-mon[52384]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:12.987 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:12 vm00 ceph-mon[52384]: Deploying daemon osd.1 on vm00 2026-03-10T09:47:12.987 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:12 vm00 ceph-mon[56720]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:12.987 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:12 vm00 ceph-mon[56720]: Deploying daemon osd.1 on vm00 2026-03-10T09:47:13.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:12 vm03 ceph-mon[50536]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:13.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:12 vm03 ceph-mon[50536]: Deploying daemon osd.1 on vm00 2026-03-10T09:47:14.226 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 1 on host 'vm00' 2026-03-10T09:47:14.280 DEBUG:teuthology.orchestra.run.vm00:osd.1> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1.service 2026-03-10T09:47:14.282 INFO:tasks.cephadm:Deploying osd.2 on vm00 with /dev/vdc... 2026-03-10T09:47:14.282 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- lvm zap /dev/vdc 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:14.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:14 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:14.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:14 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:14.920 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:47:14.933 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch daemon add osd vm00:/dev/vdc 2026-03-10T09:47:15.110 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:47:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[62684]: 2026-03-10T09:47:15.096+0000 7f018cd2e3c0 -1 osd.1 0 log_to_monitors true 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[52384]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[52384]: from='osd.1 [v2:192.168.123.100:6810/1400892823,v1:192.168.123.100:6811/1400892823]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[56720]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[56720]: from='osd.1 [v2:192.168.123.100:6810/1400892823,v1:192.168.123.100:6811/1400892823]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:15.666 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:15 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:15.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:15 vm03 ceph-mon[50536]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:47:15.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:15 vm03 ceph-mon[50536]: from='osd.1 [v2:192.168.123.100:6810/1400892823,v1:192.168.123.100:6811/1400892823]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:47:15.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:15 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:15.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:15 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:15.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:15 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:16.498 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:47:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[62684]: 2026-03-10T09:47:16.125+0000 7f0183731700 -1 osd.1 0 waiting for initial osdmap 2026-03-10T09:47:16.498 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:47:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[62684]: 2026-03-10T09:47:16.131+0000 7f01800cc700 -1 osd.1 10 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='client.24139 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='osd.1 [v2:192.168.123.100:6810/1400892823,v1:192.168.123.100:6811/1400892823]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1278962103' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b8c6c746-4058-4653-b11b-b1cb8e4cd332"}]: dispatch 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1278962103' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b8c6c746-4058-4653-b11b-b1cb8e4cd332"}]': finished 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: osdmap e10: 3 total, 1 up, 3 in 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:16.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:16 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='client.24139 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='osd.1 [v2:192.168.123.100:6810/1400892823,v1:192.168.123.100:6811/1400892823]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1278962103' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b8c6c746-4058-4653-b11b-b1cb8e4cd332"}]: dispatch 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1278962103' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b8c6c746-4058-4653-b11b-b1cb8e4cd332"}]': finished 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: osdmap e10: 3 total, 1 up, 3 in 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:16.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='client.24139 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='osd.1 [v2:192.168.123.100:6810/1400892823,v1:192.168.123.100:6811/1400892823]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1278962103' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b8c6c746-4058-4653-b11b-b1cb8e4cd332"}]: dispatch 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1278962103' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b8c6c746-4058-4653-b11b-b1cb8e4cd332"}]': finished 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: osdmap e10: 3 total, 1 up, 3 in 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:16.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:16 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:17.501 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:17 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/298399670' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:17.501 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:17 vm03 ceph-mon[50536]: osd.1 [v2:192.168.123.100:6810/1400892823,v1:192.168.123.100:6811/1400892823] boot 2026-03-10T09:47:17.501 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:17 vm03 ceph-mon[50536]: osdmap e11: 3 total, 2 up, 3 in 2026-03-10T09:47:17.501 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:17 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:17.501 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:17 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/298399670' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[52384]: osd.1 [v2:192.168.123.100:6810/1400892823,v1:192.168.123.100:6811/1400892823] boot 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[52384]: osdmap e11: 3 total, 2 up, 3 in 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/298399670' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[56720]: osd.1 [v2:192.168.123.100:6810/1400892823,v1:192.168.123.100:6811/1400892823] boot 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[56720]: osdmap e11: 3 total, 2 up, 3 in 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:47:17.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:17 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:18.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:47:17] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[52384]: purged_snaps scrub starts 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[52384]: purged_snaps scrub ok 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[52384]: pgmap v24: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[52384]: osdmap e12: 3 total, 2 up, 3 in 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[56720]: purged_snaps scrub starts 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[56720]: purged_snaps scrub ok 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[56720]: pgmap v24: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[56720]: osdmap e12: 3 total, 2 up, 3 in 2026-03-10T09:47:19.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:18 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:19.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:18 vm03 ceph-mon[50536]: purged_snaps scrub starts 2026-03-10T09:47:19.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:18 vm03 ceph-mon[50536]: purged_snaps scrub ok 2026-03-10T09:47:19.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:18 vm03 ceph-mon[50536]: pgmap v24: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T09:47:19.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:18 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:19.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:18 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:19.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:18 vm03 ceph-mon[50536]: osdmap e12: 3 total, 2 up, 3 in 2026-03-10T09:47:19.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:18 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:20.282 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:19 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:47:20.282 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:19 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:20.282 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:19 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:47:20.282 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:19 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:20.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:19 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:47:20.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:19 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:21.103 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:20 vm00 ceph-mon[52384]: pgmap v26: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T09:47:21.103 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:20 vm00 ceph-mon[52384]: Deploying daemon osd.2 on vm00 2026-03-10T09:47:21.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:20 vm00 ceph-mon[56720]: pgmap v26: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T09:47:21.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:20 vm00 ceph-mon[56720]: Deploying daemon osd.2 on vm00 2026-03-10T09:47:21.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:20 vm03 ceph-mon[50536]: pgmap v26: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T09:47:21.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:20 vm03 ceph-mon[50536]: Deploying daemon osd.2 on vm00 2026-03-10T09:47:21.801 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 2 on host 'vm00' 2026-03-10T09:47:21.882 DEBUG:teuthology.orchestra.run.vm00:osd.2> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.2.service 2026-03-10T09:47:21.883 INFO:tasks.cephadm:Deploying osd.3 on vm00 with /dev/vdb... 2026-03-10T09:47:21.883 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- lvm zap /dev/vdb 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:22.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:22.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:22 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:22.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:22 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:22.521 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:47:22.539 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch daemon add osd vm00:/dev/vdb 2026-03-10T09:47:22.734 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:47:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[65085]: 2026-03-10T09:47:22.675+0000 7fe669ffb3c0 -1 osd.2 0 log_to_monitors true 2026-03-10T09:47:23.300 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[52384]: from='osd.2 [v2:192.168.123.100:6818/2605451956,v1:192.168.123.100:6819/2605451956]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[52384]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[52384]: from='client.14262 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[56720]: from='osd.2 [v2:192.168.123.100:6818/2605451956,v1:192.168.123.100:6819/2605451956]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[56720]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[56720]: from='client.14262 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:23.301 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:23 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:23.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:23 vm03 ceph-mon[50536]: from='osd.2 [v2:192.168.123.100:6818/2605451956,v1:192.168.123.100:6819/2605451956]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:47:23.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:23 vm03 ceph-mon[50536]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:47:23.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:23 vm03 ceph-mon[50536]: from='client.14262 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:23.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:23 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:23.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:23 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:23.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:23 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:24.110 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:47:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[65085]: 2026-03-10T09:47:23.734+0000 7fe6609fe700 -1 osd.2 0 waiting for initial osdmap 2026-03-10T09:47:24.111 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:47:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[65085]: 2026-03-10T09:47:23.744+0000 7fe65ab94700 -1 osd.2 14 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:47:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T09:47:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T09:47:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='osd.2 [v2:192.168.123.100:6818/2605451956,v1:192.168.123.100:6819/2605451956]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: pgmap v29: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T09:47:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2486802438' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c20101af-d2e8-44bf-8684-17f240235e25"}]: dispatch 2026-03-10T09:47:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2486802438' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c20101af-d2e8-44bf-8684-17f240235e25"}]': finished 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: osdmap e14: 4 total, 2 up, 4 in 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='osd.2 [v2:192.168.123.100:6818/2605451956,v1:192.168.123.100:6819/2605451956]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: pgmap v29: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2486802438' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c20101af-d2e8-44bf-8684-17f240235e25"}]: dispatch 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2486802438' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c20101af-d2e8-44bf-8684-17f240235e25"}]': finished 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: osdmap e14: 4 total, 2 up, 4 in 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:24.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:24 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='osd.2 [v2:192.168.123.100:6818/2605451956,v1:192.168.123.100:6819/2605451956]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: pgmap v29: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2486802438' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c20101af-d2e8-44bf-8684-17f240235e25"}]: dispatch 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2486802438' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c20101af-d2e8-44bf-8684-17f240235e25"}]': finished 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: osdmap e14: 4 total, 2 up, 4 in 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:24.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:24 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1772063356' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[52384]: osd.2 [v2:192.168.123.100:6818/2605451956,v1:192.168.123.100:6819/2605451956] boot 2026-03-10T09:47:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[52384]: osdmap e15: 4 total, 3 up, 4 in 2026-03-10T09:47:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1772063356' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[56720]: osd.2 [v2:192.168.123.100:6818/2605451956,v1:192.168.123.100:6819/2605451956] boot 2026-03-10T09:47:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[56720]: osdmap e15: 4 total, 3 up, 4 in 2026-03-10T09:47:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:25 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:25.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:25 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1772063356' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:25.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:25 vm03 ceph-mon[50536]: osd.2 [v2:192.168.123.100:6818/2605451956,v1:192.168.123.100:6819/2605451956] boot 2026-03-10T09:47:25.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:25 vm03 ceph-mon[50536]: osdmap e15: 4 total, 3 up, 4 in 2026-03-10T09:47:25.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:25 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:47:25.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:25 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:26.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:26 vm00 ceph-mon[56720]: purged_snaps scrub starts 2026-03-10T09:47:26.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:26 vm00 ceph-mon[56720]: purged_snaps scrub ok 2026-03-10T09:47:26.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:26 vm00 ceph-mon[56720]: pgmap v32: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:26.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:26 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T09:47:26.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:26 vm00 ceph-mon[52384]: purged_snaps scrub starts 2026-03-10T09:47:26.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:26 vm00 ceph-mon[52384]: purged_snaps scrub ok 2026-03-10T09:47:26.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:26 vm00 ceph-mon[52384]: pgmap v32: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:26.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:26 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T09:47:26.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:26 vm03 ceph-mon[50536]: purged_snaps scrub starts 2026-03-10T09:47:26.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:26 vm03 ceph-mon[50536]: purged_snaps scrub ok 2026-03-10T09:47:26.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:26 vm03 ceph-mon[50536]: pgmap v32: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:26.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:26 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T09:47:27.340 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T09:47:27.340 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[52384]: osdmap e16: 4 total, 3 up, 4 in 2026-03-10T09:47:27.340 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:27.340 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T09:47:27.340 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:47:27.340 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:27.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T09:47:27.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[56720]: osdmap e16: 4 total, 3 up, 4 in 2026-03-10T09:47:27.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:27.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T09:47:27.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:47:27.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:27 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:27.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:27 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T09:47:27.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:27 vm03 ceph-mon[50536]: osdmap e16: 4 total, 3 up, 4 in 2026-03-10T09:47:27.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:27 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:27.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:27 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T09:47:27.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:27 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:47:27.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:27 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:27.859 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:47:27 vm00 sudo[67260]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-10T09:47:27.859 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:47:27 vm00 sudo[67260]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T09:47:27.859 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:47:27 vm00 sudo[67260]: pam_unix(sudo:session): session closed for user root 2026-03-10T09:47:28.217 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:47:27 vm00 sudo[67319]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-10T09:47:28.217 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:47:27 vm00 sudo[67319]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T09:47:28.217 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:47:27 vm00 sudo[67319]: pam_unix(sudo:session): session closed for user root 2026-03-10T09:47:28.217 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:47:28 vm00 sudo[67363]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-10T09:47:28.217 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:47:28 vm00 sudo[67363]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T09:47:28.217 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:47:28 vm00 sudo[67363]: pam_unix(sudo:session): session closed for user root 2026-03-10T09:47:28.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 sudo[67420]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T09:47:28.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 sudo[67420]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T09:47:28.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 sudo[67420]: pam_unix(sudo:session): session closed for user root 2026-03-10T09:47:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: Deploying daemon osd.3 on vm00 2026-03-10T09:47:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: pgmap v34: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T09:47:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T09:47:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T09:47:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T09:47:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:47:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 sudo[67487]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 sudo[67487]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 sudo[67487]: pam_unix(sudo:session): session closed for user root 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: Deploying daemon osd.3 on vm00 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: pgmap v34: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:28 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:47:28.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:47:28] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: Deploying daemon osd.3 on vm00 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: pgmap v34: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 sudo[52574]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 sudo[52574]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T09:47:29.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:28 vm03 sudo[52574]: pam_unix(sudo:session): session closed for user root 2026-03-10T09:47:29.764 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:47:29] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:47:29.765 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T09:47:29.765 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T09:47:29.765 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T09:47:29.765 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:47:29.765 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:47:29.765 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: Detected new or changed devices on vm00 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: pgmap v37: 1 pgs: 1 active+clean; 449 KiB data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: pgmap v38: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: pgmap v39: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:29.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: Detected new or changed devices on vm00 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: pgmap v37: 1 pgs: 1 active+clean; 449 KiB data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: pgmap v38: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: pgmap v39: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:29.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:29 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.863 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 3 on host 'vm00' 2026-03-10T09:47:29.917 DEBUG:teuthology.orchestra.run.vm00:osd.3> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.3.service 2026-03-10T09:47:29.918 INFO:tasks.cephadm:Deploying osd.4 on vm03 with /dev/vde... 2026-03-10T09:47:29.919 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- lvm zap /dev/vde 2026-03-10T09:47:29.947 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T09:47:29.947 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: Detected new or changed devices on vm00 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: pgmap v37: 1 pgs: 1 active+clean; 449 KiB data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: pgmap v38: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: pgmap v39: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:29.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:29 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:30.493 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T09:47:30.506 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch daemon add osd vm03:/dev/vde 2026-03-10T09:47:31.012 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:30 vm03 ceph-mon[50536]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:31.012 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:30 vm03 ceph-mon[50536]: mgrmap e15: y(active, since 58s), standbys: x 2026-03-10T09:47:31.012 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:30 vm03 ceph-mon[50536]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:47:31.012 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:30 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:31.012 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:30 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:31.012 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:30 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:31.012 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:30 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:31.012 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:30 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:31.012 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:30 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:31.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[52384]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:31.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[52384]: mgrmap e15: y(active, since 58s), standbys: x 2026-03-10T09:47:31.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[52384]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[56720]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[56720]: mgrmap e15: y(active, since 58s), standbys: x 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[56720]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:31.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:30 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: osdmap e19: 4 total, 3 up, 4 in 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='client.24155 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa9a846e-614f-4e56-875b-56d674cbe31c"}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/1807285534' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa9a846e-614f-4e56-875b-56d674cbe31c"}]: dispatch 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fa9a846e-614f-4e56-875b-56d674cbe31c"}]': finished 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: osdmap e20: 5 total, 3 up, 5 in 2026-03-10T09:47:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:47:31 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[67650]: 2026-03-10T09:47:31.722+0000 7f71fbd50700 -1 osd.3 0 waiting for initial osdmap 2026-03-10T09:47:32.120 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:47:31 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[67650]: 2026-03-10T09:47:31.730+0000 7f71f5ee6700 -1 osd.3 20 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: osdmap e19: 4 total, 3 up, 4 in 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='client.24155 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa9a846e-614f-4e56-875b-56d674cbe31c"}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/1807285534' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa9a846e-614f-4e56-875b-56d674cbe31c"}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fa9a846e-614f-4e56-875b-56d674cbe31c"}]': finished 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: osdmap e20: 5 total, 3 up, 5 in 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:32.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:31 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: osdmap e19: 4 total, 3 up, 4 in 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='client.24155 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa9a846e-614f-4e56-875b-56d674cbe31c"}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/1807285534' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa9a846e-614f-4e56-875b-56d674cbe31c"}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fa9a846e-614f-4e56-875b-56d674cbe31c"}]': finished 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: osdmap e20: 5 total, 3 up, 5 in 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:32.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:31 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[52384]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/958933556' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[52384]: osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294] boot 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[52384]: osdmap e21: 5 total, 4 up, 5 in 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[56720]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/958933556' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[56720]: osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294] boot 2026-03-10T09:47:33.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[56720]: osdmap e21: 5 total, 4 up, 5 in 2026-03-10T09:47:33.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:33.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:32 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:33.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:32 vm03 ceph-mon[50536]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T09:47:33.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:32 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/958933556' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:33.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:32 vm03 ceph-mon[50536]: osd.3 [v2:192.168.123.100:6826/3956173294,v1:192.168.123.100:6827/3956173294] boot 2026-03-10T09:47:33.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:32 vm03 ceph-mon[50536]: osdmap e21: 5 total, 4 up, 5 in 2026-03-10T09:47:33.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:32 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:47:33.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:32 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:34.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:33 vm00 ceph-mon[52384]: purged_snaps scrub starts 2026-03-10T09:47:34.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:33 vm00 ceph-mon[52384]: purged_snaps scrub ok 2026-03-10T09:47:34.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:33 vm00 ceph-mon[56720]: purged_snaps scrub starts 2026-03-10T09:47:34.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:33 vm00 ceph-mon[56720]: purged_snaps scrub ok 2026-03-10T09:47:34.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:33 vm03 ceph-mon[50536]: purged_snaps scrub starts 2026-03-10T09:47:34.294 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:33 vm03 ceph-mon[50536]: purged_snaps scrub ok 2026-03-10T09:47:34.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:34 vm03 ceph-mon[50536]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 2.0 KiB/s rd, 58 KiB/s wr, 5 op/s 2026-03-10T09:47:34.924 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:34 vm03 ceph-mon[50536]: osdmap e22: 5 total, 4 up, 5 in 2026-03-10T09:47:34.924 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:34 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:34 vm00 ceph-mon[52384]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 2.0 KiB/s rd, 58 KiB/s wr, 5 op/s 2026-03-10T09:47:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:34 vm00 ceph-mon[52384]: osdmap e22: 5 total, 4 up, 5 in 2026-03-10T09:47:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:34 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:34 vm00 ceph-mon[56720]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 2.0 KiB/s rd, 58 KiB/s wr, 5 op/s 2026-03-10T09:47:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:34 vm00 ceph-mon[56720]: osdmap e22: 5 total, 4 up, 5 in 2026-03-10T09:47:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:34 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:36.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:35 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T09:47:36.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:35 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:36.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:35 vm03 ceph-mon[50536]: Deploying daemon osd.4 on vm03 2026-03-10T09:47:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:35 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T09:47:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:35 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:35 vm00 ceph-mon[52384]: Deploying daemon osd.4 on vm03 2026-03-10T09:47:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:35 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T09:47:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:35 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:35 vm00 ceph-mon[56720]: Deploying daemon osd.4 on vm03 2026-03-10T09:47:37.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:36 vm03 ceph-mon[50536]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 1.9 KiB/s rd, 56 KiB/s wr, 5 op/s 2026-03-10T09:47:37.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:36 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:37.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:36 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:37.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:36 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:37.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:36 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:37.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:36 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[52384]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 1.9 KiB/s rd, 56 KiB/s wr, 5 op/s 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[56720]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 1.9 KiB/s rd, 56 KiB/s wr, 5 op/s 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:36 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:37.174 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 4 on host 'vm03' 2026-03-10T09:47:37.217 DEBUG:teuthology.orchestra.run.vm03:osd.4> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.4.service 2026-03-10T09:47:37.218 INFO:tasks.cephadm:Deploying osd.5 on vm03 with /dev/vdd... 2026-03-10T09:47:37.219 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- lvm zap /dev/vdd 2026-03-10T09:47:37.876 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T09:47:37.896 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch daemon add osd vm03:/dev/vdd 2026-03-10T09:47:38.334 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:38 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:38.334 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:38 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:38.334 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:38 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:38.334 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:38 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:38.334 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:38 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:38.334 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:38 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:38.334 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:38 vm03 ceph-mon[50536]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 1.5 KiB/s rd, 44 KiB/s wr, 3 op/s 2026-03-10T09:47:38.334 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:38 vm03 ceph-mon[50536]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:47:38.334 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:38 vm03 ceph-mon[50536]: from='osd.4 [v2:192.168.123.103:6800/83821511,v1:192.168.123.103:6801/83821511]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:47:38.334 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:47:38 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[54312]: 2026-03-10T09:47:38.121+0000 7f71ad1173c0 -1 osd.4 0 log_to_monitors true 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[52384]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 1.5 KiB/s rd, 44 KiB/s wr, 3 op/s 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[52384]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[52384]: from='osd.4 [v2:192.168.123.103:6800/83821511,v1:192.168.123.103:6801/83821511]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[56720]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 1.5 KiB/s rd, 44 KiB/s wr, 3 op/s 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[56720]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:47:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:38 vm00 ceph-mon[56720]: from='osd.4 [v2:192.168.123.103:6800/83821511,v1:192.168.123.103:6801/83821511]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:47:38.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:38 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:47:38] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:47:39.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:38 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:47:38] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:47:39.544 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:47:39 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[54312]: 2026-03-10T09:47:39.251+0000 7f71a531d700 -1 osd.4 0 waiting for initial osdmap 2026-03-10T09:47:39.544 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:47:39 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[54312]: 2026-03-10T09:47:39.258+0000 7f719dcb0700 -1 osd.4 24 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: osdmap e23: 5 total, 4 up, 5 in 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='osd.4 [v2:192.168.123.103:6800/83821511,v1:192.168.123.103:6801/83821511]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='client.24205 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:39.545 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:39 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: osdmap e23: 5 total, 4 up, 5 in 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='osd.4 [v2:192.168.123.103:6800/83821511,v1:192.168.123.103:6801/83821511]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='client.24205 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: osdmap e23: 5 total, 4 up, 5 in 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='osd.4 [v2:192.168.123.103:6800/83821511,v1:192.168.123.103:6801/83821511]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='client.24205 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:39 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: osdmap e24: 5 total, 4 up, 5 in 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f2832919-d86c-4c2f-b301-84a2776e8ec6"}]: dispatch 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/2903829332' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f2832919-d86c-4c2f-b301-84a2776e8ec6"}]: dispatch 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: osd.4 [v2:192.168.123.103:6800/83821511,v1:192.168.123.103:6801/83821511] boot 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f2832919-d86c-4c2f-b301-84a2776e8ec6"}]': finished 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: osdmap e25: 6 total, 5 up, 6 in 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:40.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:40 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/2468937206' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: osdmap e24: 5 total, 4 up, 5 in 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f2832919-d86c-4c2f-b301-84a2776e8ec6"}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/2903829332' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f2832919-d86c-4c2f-b301-84a2776e8ec6"}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: osd.4 [v2:192.168.123.103:6800/83821511,v1:192.168.123.103:6801/83821511] boot 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f2832919-d86c-4c2f-b301-84a2776e8ec6"}]': finished 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: osdmap e25: 6 total, 5 up, 6 in 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/2468937206' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: osdmap e24: 5 total, 4 up, 5 in 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f2832919-d86c-4c2f-b301-84a2776e8ec6"}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/2903829332' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f2832919-d86c-4c2f-b301-84a2776e8ec6"}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: osd.4 [v2:192.168.123.103:6800/83821511,v1:192.168.123.103:6801/83821511] boot 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f2832919-d86c-4c2f-b301-84a2776e8ec6"}]': finished 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: osdmap e25: 6 total, 5 up, 6 in 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:40 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/2468937206' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:41.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:41 vm03 ceph-mon[50536]: purged_snaps scrub starts 2026-03-10T09:47:41.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:41 vm03 ceph-mon[50536]: purged_snaps scrub ok 2026-03-10T09:47:41.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:41 vm03 ceph-mon[50536]: osdmap e26: 6 total, 5 up, 6 in 2026-03-10T09:47:41.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:41 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:41.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:41 vm00 ceph-mon[52384]: purged_snaps scrub starts 2026-03-10T09:47:41.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:41 vm00 ceph-mon[52384]: purged_snaps scrub ok 2026-03-10T09:47:41.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:41 vm00 ceph-mon[52384]: osdmap e26: 6 total, 5 up, 6 in 2026-03-10T09:47:41.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:41 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:41.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:41 vm00 ceph-mon[56720]: purged_snaps scrub starts 2026-03-10T09:47:41.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:41 vm00 ceph-mon[56720]: purged_snaps scrub ok 2026-03-10T09:47:41.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:41 vm00 ceph-mon[56720]: osdmap e26: 6 total, 5 up, 6 in 2026-03-10T09:47:41.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:41 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:42.614 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:42 vm03 ceph-mon[50536]: osdmap e27: 6 total, 5 up, 6 in 2026-03-10T09:47:42.614 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:42 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:42.614 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:42 vm03 ceph-mon[50536]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T09:47:42.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:42 vm00 ceph-mon[52384]: osdmap e27: 6 total, 5 up, 6 in 2026-03-10T09:47:42.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:42 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:42.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:42 vm00 ceph-mon[52384]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T09:47:42.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:42 vm00 ceph-mon[56720]: osdmap e27: 6 total, 5 up, 6 in 2026-03-10T09:47:42.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:42 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:42.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:42 vm00 ceph-mon[56720]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T09:47:43.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:43 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T09:47:43.544 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:43 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:43.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:43 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T09:47:43.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:43 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:43.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:43 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T09:47:43.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:43 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:44.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:44 vm03 ceph-mon[50536]: Deploying daemon osd.5 on vm03 2026-03-10T09:47:44.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:44 vm03 ceph-mon[50536]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 91 KiB/s, 0 objects/s recovering 2026-03-10T09:47:44.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:44 vm00 ceph-mon[52384]: Deploying daemon osd.5 on vm03 2026-03-10T09:47:44.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:44 vm00 ceph-mon[52384]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 91 KiB/s, 0 objects/s recovering 2026-03-10T09:47:44.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:44 vm00 ceph-mon[56720]: Deploying daemon osd.5 on vm03 2026-03-10T09:47:44.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:44 vm00 ceph-mon[56720]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 91 KiB/s, 0 objects/s recovering 2026-03-10T09:47:45.274 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 5 on host 'vm03' 2026-03-10T09:47:45.335 DEBUG:teuthology.orchestra.run.vm03:osd.5> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.5.service 2026-03-10T09:47:45.339 INFO:tasks.cephadm:Deploying osd.6 on vm03 with /dev/vdc... 2026-03-10T09:47:45.339 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- lvm zap /dev/vdc 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:45.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:45.795 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:45 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:45 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:45.979 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T09:47:45.992 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch daemon add osd vm03:/dev/vdc 2026-03-10T09:47:46.172 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:47:46 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[56998]: 2026-03-10T09:47:46.163+0000 7feaa3a5c3c0 -1 osd.5 0 log_to_monitors true 2026-03-10T09:47:46.742 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:46 vm03 ceph-mon[50536]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-10T09:47:46.742 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:46 vm03 ceph-mon[50536]: from='osd.5 [v2:192.168.123.103:6808/3474606609,v1:192.168.123.103:6809/3474606609]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:47:46.742 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:46 vm03 ceph-mon[50536]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:47:46.742 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:46 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:46.742 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:46 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:46.742 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:46 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[52384]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[52384]: from='osd.5 [v2:192.168.123.103:6808/3474606609,v1:192.168.123.103:6809/3474606609]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[52384]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[56720]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[56720]: from='osd.5 [v2:192.168.123.103:6808/3474606609,v1:192.168.123.103:6809/3474606609]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[56720]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:46.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:46 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:47.544 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:47:47 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[56998]: 2026-03-10T09:47:47.182+0000 7fea9a45f700 -1 osd.5 0 waiting for initial osdmap 2026-03-10T09:47:47.544 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:47:47 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[56998]: 2026-03-10T09:47:47.187+0000 7fea945f5700 -1 osd.5 29 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='client.24209 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: osdmap e28: 6 total, 5 up, 6 in 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='osd.5 [v2:192.168.123.103:6808/3474606609,v1:192.168.123.103:6809/3474606609]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/1519404400' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "310b60d4-374c-41a3-be5a-54d72a8a8262"}]: dispatch 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "310b60d4-374c-41a3-be5a-54d72a8a8262"}]: dispatch 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "310b60d4-374c-41a3-be5a-54d72a8a8262"}]': finished 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: osdmap e29: 7 total, 5 up, 7 in 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='client.24209 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: osdmap e28: 6 total, 5 up, 6 in 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='osd.5 [v2:192.168.123.103:6808/3474606609,v1:192.168.123.103:6809/3474606609]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/1519404400' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "310b60d4-374c-41a3-be5a-54d72a8a8262"}]: dispatch 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "310b60d4-374c-41a3-be5a-54d72a8a8262"}]: dispatch 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "310b60d4-374c-41a3-be5a-54d72a8a8262"}]': finished 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: osdmap e29: 7 total, 5 up, 7 in 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:47 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='client.24209 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: osdmap e28: 6 total, 5 up, 6 in 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='osd.5 [v2:192.168.123.103:6808/3474606609,v1:192.168.123.103:6809/3474606609]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/1519404400' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "310b60d4-374c-41a3-be5a-54d72a8a8262"}]: dispatch 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "310b60d4-374c-41a3-be5a-54d72a8a8262"}]: dispatch 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "310b60d4-374c-41a3-be5a-54d72a8a8262"}]': finished 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: osdmap e29: 7 total, 5 up, 7 in 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:48.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:47 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[52384]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 64 KiB/s, 0 objects/s recovering 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/1161004762' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[52384]: osd.5 [v2:192.168.123.103:6808/3474606609,v1:192.168.123.103:6809/3474606609] boot 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[52384]: osdmap e30: 7 total, 6 up, 7 in 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[56720]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 64 KiB/s, 0 objects/s recovering 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/1161004762' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[56720]: osd.5 [v2:192.168.123.103:6808/3474606609,v1:192.168.123.103:6809/3474606609] boot 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[56720]: osdmap e30: 7 total, 6 up, 7 in 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:48 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:49.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:48 vm03 ceph-mon[50536]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 64 KiB/s, 0 objects/s recovering 2026-03-10T09:47:49.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:48 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/1161004762' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:49.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:48 vm03 ceph-mon[50536]: osd.5 [v2:192.168.123.103:6808/3474606609,v1:192.168.123.103:6809/3474606609] boot 2026-03-10T09:47:49.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:48 vm03 ceph-mon[50536]: osdmap e30: 7 total, 6 up, 7 in 2026-03-10T09:47:49.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:48 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:47:49.044 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:48 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:49.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:49 vm00 ceph-mon[52384]: purged_snaps scrub starts 2026-03-10T09:47:49.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:49 vm00 ceph-mon[52384]: purged_snaps scrub ok 2026-03-10T09:47:49.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:49 vm00 ceph-mon[52384]: osdmap e31: 7 total, 6 up, 7 in 2026-03-10T09:47:49.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:49 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:49.800 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:49 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:47:49] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:47:49.800 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:49 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:47:49] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:47:49.800 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:49 vm00 ceph-mon[56720]: purged_snaps scrub starts 2026-03-10T09:47:49.800 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:49 vm00 ceph-mon[56720]: purged_snaps scrub ok 2026-03-10T09:47:49.800 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:49 vm00 ceph-mon[56720]: osdmap e31: 7 total, 6 up, 7 in 2026-03-10T09:47:49.800 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:49 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:49.834 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:49 vm03 ceph-mon[50536]: purged_snaps scrub starts 2026-03-10T09:47:49.834 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:49 vm03 ceph-mon[50536]: purged_snaps scrub ok 2026-03-10T09:47:49.834 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:49 vm03 ceph-mon[50536]: osdmap e31: 7 total, 6 up, 7 in 2026-03-10T09:47:49.834 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:49 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: pgmap v63: 1 pgs: 1 remapped+peering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: Detected new or changed devices on vm00 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: osdmap e32: 7 total, 6 up, 7 in 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T09:47:50.794 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:50 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: pgmap v63: 1 pgs: 1 remapped+peering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T09:47:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: Detected new or changed devices on vm00 2026-03-10T09:47:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: osdmap e32: 7 total, 6 up, 7 in 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: pgmap v63: 1 pgs: 1 remapped+peering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: Detected new or changed devices on vm00 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: osdmap e32: 7 total, 6 up, 7 in 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T09:47:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:50 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:51.715 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:51 vm03 ceph-mon[50536]: Deploying daemon osd.6 on vm03 2026-03-10T09:47:51.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:51 vm00 ceph-mon[52384]: Deploying daemon osd.6 on vm03 2026-03-10T09:47:51.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:51 vm00 ceph-mon[56720]: Deploying daemon osd.6 on vm03 2026-03-10T09:47:52.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:52 vm03 ceph-mon[50536]: pgmap v65: 1 pgs: 1 remapped+peering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T09:47:52.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:52 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:52.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:52 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:52.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:52 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:52.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:52 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:52.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:52 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[52384]: pgmap v65: 1 pgs: 1 remapped+peering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[56720]: pgmap v65: 1 pgs: 1 remapped+peering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:52.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:52 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:53.111 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 6 on host 'vm03' 2026-03-10T09:47:53.160 DEBUG:teuthology.orchestra.run.vm03:osd.6> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.6.service 2026-03-10T09:47:53.161 INFO:tasks.cephadm:Deploying osd.7 on vm03 with /dev/vdb... 2026-03-10T09:47:53.161 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- lvm zap /dev/vdb 2026-03-10T09:47:53.839 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T09:47:53.855 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch daemon add osd vm03:/dev/vdb 2026-03-10T09:47:54.061 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:47:54 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[59401]: 2026-03-10T09:47:54.019+0000 7f5aef0903c0 -1 osd.6 0 log_to_monitors true 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[52384]: pgmap v66: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[52384]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[56720]: pgmap v66: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:47:54.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:54 vm00 ceph-mon[56720]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T09:47:54.391 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:54 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:54.391 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:54 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:54.391 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:54 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:54.391 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:54 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:54.391 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:54 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:47:54.391 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:54 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:54.391 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:54 vm03 ceph-mon[50536]: pgmap v66: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:47:54.391 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:54 vm03 ceph-mon[50536]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:47:55 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[59401]: 2026-03-10T09:47:55.110+0000 7f5ae5a93700 -1 osd.6 0 waiting for initial osdmap 2026-03-10T09:47:55.548 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:47:55 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[59401]: 2026-03-10T09:47:55.125+0000 7f5ae242e700 -1 osd.6 34 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='client.24253 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/3717064126' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4c157a84-f3cc-493a-ae93-8da2f0c9dd62"}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4c157a84-f3cc-493a-ae93-8da2f0c9dd62"}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4c157a84-f3cc-493a-ae93-8da2f0c9dd62"}]': finished 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: osdmap e34: 8 total, 6 up, 8 in 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:55.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:55 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='client.24253 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/3717064126' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4c157a84-f3cc-493a-ae93-8da2f0c9dd62"}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4c157a84-f3cc-493a-ae93-8da2f0c9dd62"}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4c157a84-f3cc-493a-ae93-8da2f0c9dd62"}]': finished 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: osdmap e34: 8 total, 6 up, 8 in 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:55.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='client.24253 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/3717064126' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4c157a84-f3cc-493a-ae93-8da2f0c9dd62"}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4c157a84-f3cc-493a-ae93-8da2f0c9dd62"}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290]' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4c157a84-f3cc-493a-ae93-8da2f0c9dd62"}]': finished 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: osdmap e34: 8 total, 6 up, 8 in 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:55 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[52384]: osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290] boot 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[52384]: osdmap e35: 8 total, 7 up, 8 in 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[52384]: pgmap v70: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 1/6 objects misplaced (16.667%); 13 KiB/s, 0 objects/s recovering 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/607155769' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[56720]: osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290] boot 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[56720]: osdmap e35: 8 total, 7 up, 8 in 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[56720]: pgmap v70: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 1/6 objects misplaced (16.667%); 13 KiB/s, 0 objects/s recovering 2026-03-10T09:47:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:56 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/607155769' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:56 vm03 ceph-mon[50536]: osd.6 [v2:192.168.123.103:6816/3652588290,v1:192.168.123.103:6817/3652588290] boot 2026-03-10T09:47:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:56 vm03 ceph-mon[50536]: osdmap e35: 8 total, 7 up, 8 in 2026-03-10T09:47:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:47:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:56 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:56 vm03 ceph-mon[50536]: pgmap v70: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 1/6 objects misplaced (16.667%); 13 KiB/s, 0 objects/s recovering 2026-03-10T09:47:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:56 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/607155769' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T09:47:57.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:57 vm00 ceph-mon[52384]: purged_snaps scrub starts 2026-03-10T09:47:57.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:57 vm00 ceph-mon[52384]: purged_snaps scrub ok 2026-03-10T09:47:57.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:57 vm00 ceph-mon[52384]: osdmap e36: 8 total, 7 up, 8 in 2026-03-10T09:47:57.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:57 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:57.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:57 vm00 ceph-mon[56720]: purged_snaps scrub starts 2026-03-10T09:47:57.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:57 vm00 ceph-mon[56720]: purged_snaps scrub ok 2026-03-10T09:47:57.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:57 vm00 ceph-mon[56720]: osdmap e36: 8 total, 7 up, 8 in 2026-03-10T09:47:57.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:57 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:57.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:57 vm03 ceph-mon[50536]: purged_snaps scrub starts 2026-03-10T09:47:57.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:57 vm03 ceph-mon[50536]: purged_snaps scrub ok 2026-03-10T09:47:57.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:57 vm03 ceph-mon[50536]: osdmap e36: 8 total, 7 up, 8 in 2026-03-10T09:47:57.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:57 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:58.536 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:58 vm03 ceph-mon[50536]: osdmap e37: 8 total, 7 up, 8 in 2026-03-10T09:47:58.536 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:58 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:58.536 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:58 vm03 ceph-mon[50536]: pgmap v73: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:47:58.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:58 vm00 ceph-mon[52384]: osdmap e37: 8 total, 7 up, 8 in 2026-03-10T09:47:58.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:58 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:58.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:58 vm00 ceph-mon[52384]: pgmap v73: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:47:58.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:58 vm00 ceph-mon[56720]: osdmap e37: 8 total, 7 up, 8 in 2026-03-10T09:47:58.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:58 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:47:58.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:58 vm00 ceph-mon[56720]: pgmap v73: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:47:59.350 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:58 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:47:58] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: Deploying daemon osd.7 on vm03 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: Detected new or changed devices on vm03 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:59.537 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: pgmap v74: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:47:59.538 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:47:59 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: Deploying daemon osd.7 on vm03 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: Detected new or changed devices on vm03 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: pgmap v74: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: Deploying daemon osd.7 on vm03 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: Detected new or changed devices on vm03 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: pgmap v74: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:47:59 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:47:59.619 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:47:59 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:47:59] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:00 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:00 vm03 ceph-mon[50536]: Detected new or changed devices on vm03 2026-03-10T09:48:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:00 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:00 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:00 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:00 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:00 vm03 ceph-mon[50536]: pgmap v75: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:48:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:00 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[52384]: Detected new or changed devices on vm03 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[52384]: pgmap v75: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[56720]: Detected new or changed devices on vm03 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:00.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:00.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[56720]: pgmap v75: 1 pgs: 1 active+recovering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 1/6 objects misplaced (16.667%) 2026-03-10T09:48:00.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:00 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.083 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 7 on host 'vm03' 2026-03-10T09:48:02.150 DEBUG:teuthology.orchestra.run.vm03:osd.7> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.7.service 2026-03-10T09:48:02.152 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-10T09:48:02.152 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd stat -f json 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: pgmap v76: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 53 KiB/s, 0 objects/s recovering 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: pgmap v76: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 53 KiB/s, 0 objects/s recovering 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:48:02.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:02 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.607 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: pgmap v76: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 53 KiB/s, 0 objects/s recovering 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:48:02.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:02 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:03.082 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":37,"num_osds":8,"num_up_osds":7,"osd_up_since":1773136075,"num_in_osds":8,"osd_in_since":1773136075,"num_remapped_pgs":0} 2026-03-10T09:48:03.351 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:03 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3094804885' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T09:48:03.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:03 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3094804885' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T09:48:03.735 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:03 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3094804885' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T09:48:04.048 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 09:48:03 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[62139]: 2026-03-10T09:48:03.733+0000 7f19005f63c0 -1 osd.7 0 log_to_monitors true 2026-03-10T09:48:04.083 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd stat -f json 2026-03-10T09:48:04.768 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:04 vm00 ceph-mon[52384]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 44 KiB/s, 0 objects/s recovering 2026-03-10T09:48:04.768 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:04 vm00 ceph-mon[52384]: from='osd.7 [v2:192.168.123.103:6824/3293964311,v1:192.168.123.103:6825/3293964311]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T09:48:04.768 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:04 vm00 ceph-mon[52384]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T09:48:04.769 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:04 vm00 ceph-mon[56720]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 44 KiB/s, 0 objects/s recovering 2026-03-10T09:48:04.769 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:04 vm00 ceph-mon[56720]: from='osd.7 [v2:192.168.123.103:6824/3293964311,v1:192.168.123.103:6825/3293964311]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T09:48:04.769 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:04 vm00 ceph-mon[56720]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T09:48:04.769 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:04.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:04 vm03 ceph-mon[50536]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 44 KiB/s, 0 objects/s recovering 2026-03-10T09:48:04.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:04 vm03 ceph-mon[50536]: from='osd.7 [v2:192.168.123.103:6824/3293964311,v1:192.168.123.103:6825/3293964311]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T09:48:04.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:04 vm03 ceph-mon[50536]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T09:48:05.512 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":38,"num_osds":8,"num_up_osds":7,"osd_up_since":1773136075,"num_in_osds":8,"osd_in_since":1773136075,"num_remapped_pgs":0} 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[52384]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[52384]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[52384]: from='osd.7 [v2:192.168.123.103:6824/3293964311,v1:192.168.123.103:6825/3293964311]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[52384]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2726309334' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[56720]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[56720]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[56720]: from='osd.7 [v2:192.168.123.103:6824/3293964311,v1:192.168.123.103:6825/3293964311]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[56720]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:48:05.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:05 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2726309334' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T09:48:06.047 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 09:48:05 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[62139]: 2026-03-10T09:48:05.660+0000 7f18f6ff9700 -1 osd.7 0 waiting for initial osdmap 2026-03-10T09:48:06.048 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 09:48:05 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[62139]: 2026-03-10T09:48:05.669+0000 7f18f3994700 -1 osd.7 39 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:48:06.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:05 vm03 ceph-mon[50536]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T09:48:06.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:05 vm03 ceph-mon[50536]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T09:48:06.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:05 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:06.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:05 vm03 ceph-mon[50536]: from='osd.7 [v2:192.168.123.103:6824/3293964311,v1:192.168.123.103:6825/3293964311]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:48:06.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:05 vm03 ceph-mon[50536]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:48:06.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:05 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2726309334' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T09:48:06.512 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd stat -f json 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[52384]: pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 40 KiB/s, 0 objects/s recovering 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[52384]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[52384]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[56720]: pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 40 KiB/s, 0 objects/s recovering 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[56720]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[56720]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:06.789 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:06 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:06.958 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:07.019 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":40,"num_osds":8,"num_up_osds":8,"osd_up_since":1773136086,"num_in_osds":8,"osd_in_since":1773136075,"num_remapped_pgs":1} 2026-03-10T09:48:07.019 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd dump --format=json 2026-03-10T09:48:07.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:06 vm03 ceph-mon[50536]: pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 40 KiB/s, 0 objects/s recovering 2026-03-10T09:48:07.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:06 vm03 ceph-mon[50536]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T09:48:07.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:06 vm03 ceph-mon[50536]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T09:48:07.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:06 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:07.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:06 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:07.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:06 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:07.163 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:07.483 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:07.483 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":40,"fsid":"e2d4b2ee-1c65-11f1-bae0-b525704df8fa","created":"2026-03-10T09:46:02.779534+0000","modified":"2026-03-10T09:48:06.665497+0000","last_up_change":"2026-03-10T09:48:06.665497+0000","last_in_change":"2026-03-10T09:47:55.097554+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T09:47:25.514600+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"74379a85-5860-4155-922a-ce2adedc2262","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":39,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6803","nonce":398256759}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6805","nonce":398256759}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6809","nonce":398256759}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6807","nonce":398256759}]},"public_addr":"192.168.123.100:6803/398256759","cluster_addr":"192.168.123.100:6805/398256759","heartbeat_back_addr":"192.168.123.100:6809/398256759","heartbeat_front_addr":"192.168.123.100:6807/398256759","state":["exists","up"]},{"osd":1,"uuid":"718beebe-c05a-490a-835a-00fdd797508b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6811","nonce":1400892823}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6813","nonce":1400892823}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6817","nonce":1400892823}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6815","nonce":1400892823}]},"public_addr":"192.168.123.100:6811/1400892823","cluster_addr":"192.168.123.100:6813/1400892823","heartbeat_back_addr":"192.168.123.100:6817/1400892823","heartbeat_front_addr":"192.168.123.100:6815/1400892823","state":["exists","up"]},{"osd":2,"uuid":"b8c6c746-4058-4653-b11b-b1cb8e4cd332","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":15,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6819","nonce":2605451956}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6821","nonce":2605451956}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6825","nonce":2605451956}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6823","nonce":2605451956}]},"public_addr":"192.168.123.100:6819/2605451956","cluster_addr":"192.168.123.100:6821/2605451956","heartbeat_back_addr":"192.168.123.100:6825/2605451956","heartbeat_front_addr":"192.168.123.100:6823/2605451956","state":["exists","up"]},{"osd":3,"uuid":"c20101af-d2e8-44bf-8684-17f240235e25","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6827","nonce":3956173294}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6829","nonce":3956173294}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6833","nonce":3956173294}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6831","nonce":3956173294}]},"public_addr":"192.168.123.100:6827/3956173294","cluster_addr":"192.168.123.100:6829/3956173294","heartbeat_back_addr":"192.168.123.100:6833/3956173294","heartbeat_front_addr":"192.168.123.100:6831/3956173294","state":["exists","up"]},{"osd":4,"uuid":"fa9a846e-614f-4e56-875b-56d674cbe31c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6801","nonce":83821511}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6803","nonce":83821511}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6807","nonce":83821511}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6805","nonce":83821511}]},"public_addr":"192.168.123.103:6801/83821511","cluster_addr":"192.168.123.103:6803/83821511","heartbeat_back_addr":"192.168.123.103:6807/83821511","heartbeat_front_addr":"192.168.123.103:6805/83821511","state":["exists","up"]},{"osd":5,"uuid":"f2832919-d86c-4c2f-b301-84a2776e8ec6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":30,"up_thru":31,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6809","nonce":3474606609}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6811","nonce":3474606609}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6815","nonce":3474606609}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6813","nonce":3474606609}]},"public_addr":"192.168.123.103:6809/3474606609","cluster_addr":"192.168.123.103:6811/3474606609","heartbeat_back_addr":"192.168.123.103:6815/3474606609","heartbeat_front_addr":"192.168.123.103:6813/3474606609","state":["exists","up"]},{"osd":6,"uuid":"310b60d4-374c-41a3-be5a-54d72a8a8262","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":35,"up_thru":36,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6817","nonce":3652588290}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6819","nonce":3652588290}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6823","nonce":3652588290}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6821","nonce":3652588290}]},"public_addr":"192.168.123.103:6817/3652588290","cluster_addr":"192.168.123.103:6819/3652588290","heartbeat_back_addr":"192.168.123.103:6823/3652588290","heartbeat_front_addr":"192.168.123.103:6821/3652588290","state":["exists","up"]},{"osd":7,"uuid":"4c157a84-f3cc-493a-ae93-8da2f0c9dd62","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6825","nonce":3293964311}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6827","nonce":3293964311}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6831","nonce":3293964311}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6829","nonce":3293964311}]},"public_addr":"192.168.123.103:6825/3293964311","cluster_addr":"192.168.123.103:6827/3293964311","heartbeat_back_addr":"192.168.123.103:6831/3293964311","heartbeat_front_addr":"192.168.123.103:6829/3293964311","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:08.075727+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:16.061623+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:23.727100+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:30.891897+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:39.098208+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:47.199096+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:55.070496+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"0.000000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[{"pgid":"1.0","osds":[0,6,1]}],"primary_temp":[],"blocklist":{"192.168.123.100:6801/254974794":"2026-03-11T09:46:30.848027+0000","192.168.123.100:6800/254974794":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/2863919683":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/3022761531":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/1870828082":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/1008100373":"2026-03-11T09:46:19.963160+0000","192.168.123.100:6800/3678185549":"2026-03-11T09:46:19.963160+0000","192.168.123.100:0/607966756":"2026-03-11T09:46:19.963160+0000","192.168.123.100:6801/3678185549":"2026-03-11T09:46:19.963160+0000","192.168.123.100:0/2045903313":"2026-03-11T09:46:19.963160+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T09:48:07.530 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-10T09:47:25.514600+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '18', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}}] 2026-03-10T09:48:07.530 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd pool get .mgr pg_num 2026-03-10T09:48:07.678 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[52384]: purged_snaps scrub starts 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[52384]: purged_snaps scrub ok 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[52384]: osd.7 [v2:192.168.123.103:6824/3293964311,v1:192.168.123.103:6825/3293964311] boot 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[52384]: osdmap e40: 8 total, 8 up, 8 in 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3528339169' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1855795634' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[56720]: purged_snaps scrub starts 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[56720]: purged_snaps scrub ok 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[56720]: osd.7 [v2:192.168.123.103:6824/3293964311,v1:192.168.123.103:6825/3293964311] boot 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[56720]: osdmap e40: 8 total, 8 up, 8 in 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3528339169' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T09:48:07.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:07 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1855795634' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T09:48:08.029 INFO:teuthology.orchestra.run.vm00.stdout:pg_num: 1 2026-03-10T09:48:08.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:07 vm03 ceph-mon[50536]: purged_snaps scrub starts 2026-03-10T09:48:08.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:07 vm03 ceph-mon[50536]: purged_snaps scrub ok 2026-03-10T09:48:08.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:07 vm03 ceph-mon[50536]: osd.7 [v2:192.168.123.103:6824/3293964311,v1:192.168.123.103:6825/3293964311] boot 2026-03-10T09:48:08.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:07 vm03 ceph-mon[50536]: osdmap e40: 8 total, 8 up, 8 in 2026-03-10T09:48:08.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:07 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:08.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:07 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3528339169' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T09:48:08.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:07 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1855795634' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T09:48:08.097 INFO:tasks.cephadm:Adding prometheus.a on vm03 2026-03-10T09:48:08.097 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch apply prometheus '1;vm03=a' 2026-03-10T09:48:08.556 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled prometheus update... 2026-03-10T09:48:08.600 DEBUG:teuthology.orchestra.run.vm03:prometheus.a> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@prometheus.a.service 2026-03-10T09:48:08.602 INFO:tasks.cephadm:Adding node-exporter.a on vm00 2026-03-10T09:48:08.602 INFO:tasks.cephadm:Adding node-exporter.b on vm03 2026-03-10T09:48:08.602 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch apply node-exporter '2;vm00=a;vm03=b' 2026-03-10T09:48:08.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:08 vm03 ceph-mon[50536]: pgmap v82: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T09:48:08.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:08 vm03 ceph-mon[50536]: osdmap e41: 8 total, 8 up, 8 in 2026-03-10T09:48:08.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:08 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3944644797' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T09:48:08.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:08.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:08.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:08.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:08.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:08.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:08 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T09:48:09.103 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled node-exporter update... 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[52384]: pgmap v82: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[52384]: osdmap e41: 8 total, 8 up, 8 in 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3944644797' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T09:48:09.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[56720]: pgmap v82: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T09:48:09.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[56720]: osdmap e41: 8 total, 8 up, 8 in 2026-03-10T09:48:09.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3944644797' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T09:48:09.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:09.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:09.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:09.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:09.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:09.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:08 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T09:48:09.163 DEBUG:teuthology.orchestra.run.vm00:node-exporter.a> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.a.service 2026-03-10T09:48:09.164 DEBUG:teuthology.orchestra.run.vm03:node-exporter.b> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.b.service 2026-03-10T09:48:09.166 INFO:tasks.cephadm:Adding alertmanager.a on vm00 2026-03-10T09:48:09.166 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch apply alertmanager '1;vm00=a' 2026-03-10T09:48:09.901 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:09 vm00 ceph-mon[52384]: from='client.24278 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm03=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:09.901 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:09 vm00 ceph-mon[52384]: Saving service prometheus spec with placement vm03=a;count:1 2026-03-10T09:48:09.901 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:09 vm00 ceph-mon[52384]: osdmap e42: 8 total, 8 up, 8 in 2026-03-10T09:48:09.901 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:09 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:09.901 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ignoring --setuser ceph since I am not root 2026-03-10T09:48:09.901 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ignoring --setgroup ceph since I am not root 2026-03-10T09:48:09.902 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:09.890+0000 7f28c19d7000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:48:10.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:09 vm03 ceph-mon[50536]: from='client.24278 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm03=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:10.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:09 vm03 ceph-mon[50536]: Saving service prometheus spec with placement vm03=a;count:1 2026-03-10T09:48:10.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:09 vm03 ceph-mon[50536]: osdmap e42: 8 total, 8 up, 8 in 2026-03-10T09:48:10.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:09 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:10.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ignoring --setuser ceph since I am not root 2026-03-10T09:48:10.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ignoring --setgroup ceph since I am not root 2026-03-10T09:48:10.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:09.878+0000 7f016eb67000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:48:10.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:09.934+0000 7f016eb67000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:48:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:09 vm00 ceph-mon[56720]: from='client.24278 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm03=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:09 vm00 ceph-mon[56720]: Saving service prometheus spec with placement vm03=a;count:1 2026-03-10T09:48:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:09 vm00 ceph-mon[56720]: osdmap e42: 8 total, 8 up, 8 in 2026-03-10T09:48:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:09 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:10.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:09.958+0000 7f28c19d7000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:48:10.720 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:10.348+0000 7f016eb67000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:48:10.720 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:10.719+0000 7f016eb67000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:48:10.759 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:10.387+0000 7f28c19d7000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:48:11.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:10 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T09:48:11.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:10 vm03 ceph-mon[50536]: mgrmap e16: y(active, since 98s), standbys: x 2026-03-10T09:48:11.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:10 vm03 ceph-mon[50536]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:11.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:10.862+0000 7f016eb67000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:48:11.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:10.915+0000 7f016eb67000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:48:11.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:10 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T09:48:11.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:10 vm00 ceph-mon[52384]: mgrmap e16: y(active, since 98s), standbys: x 2026-03-10T09:48:11.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:10 vm00 ceph-mon[52384]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:11.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:10 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T09:48:11.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:10 vm00 ceph-mon[56720]: mgrmap e16: y(active, since 98s), standbys: x 2026-03-10T09:48:11.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:10 vm00 ceph-mon[56720]: from='mgr.14152 192.168.123.100:0/2616060411' entity='mgr.y' 2026-03-10T09:48:11.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:10.758+0000 7f28c19d7000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:48:11.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:10.924+0000 7f28c19d7000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:48:11.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:10.990+0000 7f28c19d7000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:48:11.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:11.111+0000 7f016eb67000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:48:11.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:11.183+0000 7f28c19d7000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:48:11.998 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:11.738+0000 7f016eb67000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:48:11.999 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:11.936+0000 7f016eb67000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:48:12.078 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:11.819+0000 7f28c19d7000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:48:12.078 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:12.016+0000 7f28c19d7000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:48:12.297 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:11.997+0000 7f016eb67000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:48:12.330 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:12.051+0000 7f016eb67000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:48:12.330 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:12.112+0000 7f016eb67000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:48:12.330 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:12.168+0000 7f016eb67000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:48:12.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:12.078+0000 7f28c19d7000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:48:12.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:12.132+0000 7f28c19d7000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:48:12.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:12.196+0000 7f28c19d7000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:48:12.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:12.254+0000 7f28c19d7000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:48:12.797 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:12.473+0000 7f016eb67000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:48:12.854 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:12.543+0000 7f016eb67000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:48:12.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:12.574+0000 7f28c19d7000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:48:12.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:12.652+0000 7f28c19d7000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:48:13.395 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:13.109+0000 7f016eb67000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:48:13.395 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:13.173+0000 7f016eb67000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:48:13.395 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:13.255+0000 7f016eb67000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:48:13.525 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:13.233+0000 7f28c19d7000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:48:13.525 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:13.305+0000 7f28c19d7000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:48:13.525 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:13.386+0000 7f28c19d7000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:48:13.682 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:13.394+0000 7f016eb67000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:48:13.682 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:13.465+0000 7f016eb67000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:48:13.682 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:13.583+0000 7f016eb67000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:48:13.786 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:13.525+0000 7f28c19d7000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:48:13.786 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:13.595+0000 7f28c19d7000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:48:13.786 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:13.695+0000 7f28c19d7000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:48:14.007 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:13.681+0000 7f016eb67000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:48:14.110 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:13.785+0000 7f28c19d7000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:48:14.297 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:14.007+0000 7f016eb67000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:14.074+0000 7f016eb67000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: [10/Mar/2026:09:48:14] ENGINE Bus STARTING 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: CherryPy Checker: 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: The Application mounted at '' has an empty config. 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: [10/Mar/2026:09:48:14] ENGINE Serving on http://:::9283 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: [10/Mar/2026:09:48:14] ENGINE Bus STARTED 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:14 vm03 ceph-mon[50536]: Standby manager daemon x restarted 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:14 vm03 ceph-mon[50536]: Standby manager daemon x started 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:14 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:14 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:14 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:48:14.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:14 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[52384]: Standby manager daemon x restarted 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[52384]: Standby manager daemon x started 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[56720]: Standby manager daemon x restarted 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[56720]: Standby manager daemon x started 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:14 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/1643143688' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:14.109+0000 7f28c19d7000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:48:14.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:14.178+0000 7f28c19d7000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:48:15.420 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:15 vm03 ceph-mon[50536]: mgrmap e17: y(active, since 103s), standbys: x 2026-03-10T09:48:15.420 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:15 vm03 ceph-mon[50536]: Active manager daemon y restarted 2026-03-10T09:48:15.420 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:15 vm03 ceph-mon[50536]: Activating manager daemon y 2026-03-10T09:48:15.420 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:15 vm03 ceph-mon[50536]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T09:48:15.450 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:48:15] ENGINE Bus STARTING 2026-03-10T09:48:15.450 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: CherryPy Checker: 2026-03-10T09:48:15.450 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: The Application mounted at '' has an empty config. 2026-03-10T09:48:15.450 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:48:15.450 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:48:15] ENGINE Serving on http://:::9283 2026-03-10T09:48:15.450 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:48:15] ENGINE Bus STARTED 2026-03-10T09:48:15.450 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:15 vm00 ceph-mon[56720]: mgrmap e17: y(active, since 103s), standbys: x 2026-03-10T09:48:15.450 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:15 vm00 ceph-mon[56720]: Active manager daemon y restarted 2026-03-10T09:48:15.451 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:15 vm00 ceph-mon[56720]: Activating manager daemon y 2026-03-10T09:48:15.451 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:15 vm00 ceph-mon[56720]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T09:48:15.451 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:15 vm00 ceph-mon[52384]: mgrmap e17: y(active, since 103s), standbys: x 2026-03-10T09:48:15.451 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:15 vm00 ceph-mon[52384]: Active manager daemon y restarted 2026-03-10T09:48:15.451 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:15 vm00 ceph-mon[52384]: Activating manager daemon y 2026-03-10T09:48:15.451 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:15 vm00 ceph-mon[52384]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T09:48:15.705 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:48:15] ENGINE Bus STARTING 2026-03-10T09:48:16.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:48:15] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:48:16.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:48:15] ENGINE Bus STARTED 2026-03-10T09:48:16.203 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled alertmanager update... 2026-03-10T09:48:16.246 DEBUG:teuthology.orchestra.run.vm00:alertmanager.a> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@alertmanager.a.service 2026-03-10T09:48:16.248 INFO:tasks.cephadm:Adding grafana.a on vm03 2026-03-10T09:48:16.248 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph orch apply grafana '1;vm03=a' 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: mgrmap e18: y(active, starting, since 0.982174s), standbys: x 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: Manager daemon y is now available 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: [10/Mar/2026:09:48:15] ENGINE Bus STARTING 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:48:16.425 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: [10/Mar/2026:09:48:15] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:48:16.426 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: [10/Mar/2026:09:48:15] ENGINE Bus STARTED 2026-03-10T09:48:16.426 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.426 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.426 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.426 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: mgrmap e18: y(active, starting, since 0.982174s), standbys: x 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: Manager daemon y is now available 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: [10/Mar/2026:09:48:15] ENGINE Bus STARTING 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: [10/Mar/2026:09:48:15] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: [10/Mar/2026:09:48:15] ENGINE Bus STARTED 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: mgrmap e18: y(active, starting, since 0.982174s), standbys: x 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:48:16.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: Manager daemon y is now available 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: [10/Mar/2026:09:48:15] ENGINE Bus STARTING 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: [10/Mar/2026:09:48:15] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: [10/Mar/2026:09:48:15] ENGINE Bus STARTED 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:16.716 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled grafana update... 2026-03-10T09:48:16.790 DEBUG:teuthology.orchestra.run.vm03:grafana.a> sudo journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@grafana.a.service 2026-03-10T09:48:16.791 INFO:tasks.cephadm:Setting up client nodes... 2026-03-10T09:48:16.791 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T09:48:17.352 INFO:teuthology.orchestra.run.vm00.stdout:[client.0] 2026-03-10T09:48:17.352 INFO:teuthology.orchestra.run.vm00.stdout: key = AQDh6K9panBqExAA2I5LBLVquUucaoAtFJZ1TA== 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[52384]: Deploying daemon alertmanager.a on vm00 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[52384]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[52384]: from='client.24290 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm00=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[52384]: Saving service alertmanager spec with placement vm00=a;count:1 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[52384]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[52384]: from='client.24323 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm03=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[52384]: Saving service grafana spec with placement vm03=a;count:1 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:17.352 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[56720]: Deploying daemon alertmanager.a on vm00 2026-03-10T09:48:17.353 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[56720]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-10T09:48:17.353 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[56720]: from='client.24290 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm00=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:17.353 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[56720]: Saving service alertmanager spec with placement vm00=a;count:1 2026-03-10T09:48:17.353 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[56720]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:17.353 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:17.353 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[56720]: from='client.24323 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm03=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:17.353 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[56720]: Saving service grafana spec with placement vm03=a;count:1 2026-03-10T09:48:17.353 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:17 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:17.401 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T09:48:17.401 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-10T09:48:17.401 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-10T09:48:17.442 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T09:48:17.465 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:17 vm03 ceph-mon[50536]: Deploying daemon alertmanager.a on vm00 2026-03-10T09:48:17.465 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:17 vm03 ceph-mon[50536]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-10T09:48:17.465 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:17 vm03 ceph-mon[50536]: from='client.24290 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm00=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:17.465 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:17 vm03 ceph-mon[50536]: Saving service alertmanager spec with placement vm00=a;count:1 2026-03-10T09:48:17.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:17 vm03 ceph-mon[50536]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:17.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:17 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:17.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:17 vm03 ceph-mon[50536]: from='client.24323 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm03=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:17.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:17 vm03 ceph-mon[50536]: Saving service grafana spec with placement vm03=a;count:1 2026-03-10T09:48:17.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:17 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:17.972 INFO:teuthology.orchestra.run.vm03.stdout:[client.1] 2026-03-10T09:48:17.972 INFO:teuthology.orchestra.run.vm03.stdout: key = AQDh6K9p9OaiORAATnl++XMlpmuIFqWLArVKDQ== 2026-03-10T09:48:18.044 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T09:48:18.044 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-10T09:48:18.044 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-10T09:48:18.119 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-10T09:48:18.119 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-10T09:48:18.120 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph mgr dump --format=json 2026-03-10T09:48:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:18 vm03 ceph-mon[50536]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:18 vm03 ceph-mon[50536]: mgrmap e20: y(active, since 3s), standbys: x 2026-03-10T09:48:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:18 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/185507242' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T09:48:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:18 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/185507242' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T09:48:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:18 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/3568047881' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T09:48:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:18 vm03 ceph-mon[50536]: from='client.? 192.168.123.103:0/3568047881' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T09:48:18.373 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:18.409 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[52384]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:18.409 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[52384]: mgrmap e20: y(active, since 3s), standbys: x 2026-03-10T09:48:18.409 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/185507242' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T09:48:18.409 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/185507242' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T09:48:18.409 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/3568047881' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T09:48:18.409 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[52384]: from='client.? 192.168.123.103:0/3568047881' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T09:48:18.409 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[56720]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:18.409 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[56720]: mgrmap e20: y(active, since 3s), standbys: x 2026-03-10T09:48:18.410 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/185507242' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T09:48:18.410 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/185507242' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T09:48:18.410 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/3568047881' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T09:48:18.410 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:18 vm00 ceph-mon[56720]: from='client.? 192.168.123.103:0/3568047881' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T09:48:18.884 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:18.975 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":20,"active_gid":24302,"active_name":"y","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6800","nonce":99749656},{"type":"v1","addr":"192.168.123.100:6801","nonce":99749656}]},"active_addr":"192.168.123.100:6801/99749656","active_change":"2026-03-10T09:48:14.181303+0000","active_mgr_features":4540138303579357183,"available":true,"standbys":[{"gid":24296,"name":"x","mgr_features":4540138303579357183,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.100:8443/","prometheus":"http://192.168.123.100:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"last_failure_osd_epoch":43,"active_clients":[{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":898935092}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":135643742}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":933298556}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":394930303}]}]}} 2026-03-10T09:48:18.976 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-10T09:48:18.976 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-10T09:48:18.976 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd dump --format=json 2026-03-10T09:48:19.259 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:19.388 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:19 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/948900859' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T09:48:19.388 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:19 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/948900859' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T09:48:19.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:19 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/948900859' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T09:48:19.951 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:19] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:19.951 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[70832]: level=info ts=2026-03-10T09:48:19.894Z caller=cluster.go:671 component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-10T09:48:20.037 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:20.038 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":43,"fsid":"e2d4b2ee-1c65-11f1-bae0-b525704df8fa","created":"2026-03-10T09:46:02.779534+0000","modified":"2026-03-10T09:48:14.180734+0000","last_up_change":"2026-03-10T09:48:06.665497+0000","last_in_change":"2026-03-10T09:47:55.097554+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T09:47:25.514600+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"74379a85-5860-4155-922a-ce2adedc2262","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6803","nonce":398256759}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6805","nonce":398256759}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6809","nonce":398256759}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6807","nonce":398256759}]},"public_addr":"192.168.123.100:6803/398256759","cluster_addr":"192.168.123.100:6805/398256759","heartbeat_back_addr":"192.168.123.100:6809/398256759","heartbeat_front_addr":"192.168.123.100:6807/398256759","state":["exists","up"]},{"osd":1,"uuid":"718beebe-c05a-490a-835a-00fdd797508b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6811","nonce":1400892823}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6813","nonce":1400892823}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6817","nonce":1400892823}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6815","nonce":1400892823}]},"public_addr":"192.168.123.100:6811/1400892823","cluster_addr":"192.168.123.100:6813/1400892823","heartbeat_back_addr":"192.168.123.100:6817/1400892823","heartbeat_front_addr":"192.168.123.100:6815/1400892823","state":["exists","up"]},{"osd":2,"uuid":"b8c6c746-4058-4653-b11b-b1cb8e4cd332","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":15,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6819","nonce":2605451956}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6821","nonce":2605451956}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6825","nonce":2605451956}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6823","nonce":2605451956}]},"public_addr":"192.168.123.100:6819/2605451956","cluster_addr":"192.168.123.100:6821/2605451956","heartbeat_back_addr":"192.168.123.100:6825/2605451956","heartbeat_front_addr":"192.168.123.100:6823/2605451956","state":["exists","up"]},{"osd":3,"uuid":"c20101af-d2e8-44bf-8684-17f240235e25","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6827","nonce":3956173294}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6829","nonce":3956173294}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6833","nonce":3956173294}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6831","nonce":3956173294}]},"public_addr":"192.168.123.100:6827/3956173294","cluster_addr":"192.168.123.100:6829/3956173294","heartbeat_back_addr":"192.168.123.100:6833/3956173294","heartbeat_front_addr":"192.168.123.100:6831/3956173294","state":["exists","up"]},{"osd":4,"uuid":"fa9a846e-614f-4e56-875b-56d674cbe31c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6801","nonce":83821511}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6803","nonce":83821511}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6807","nonce":83821511}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6805","nonce":83821511}]},"public_addr":"192.168.123.103:6801/83821511","cluster_addr":"192.168.123.103:6803/83821511","heartbeat_back_addr":"192.168.123.103:6807/83821511","heartbeat_front_addr":"192.168.123.103:6805/83821511","state":["exists","up"]},{"osd":5,"uuid":"f2832919-d86c-4c2f-b301-84a2776e8ec6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":30,"up_thru":31,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6809","nonce":3474606609}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6811","nonce":3474606609}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6815","nonce":3474606609}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6813","nonce":3474606609}]},"public_addr":"192.168.123.103:6809/3474606609","cluster_addr":"192.168.123.103:6811/3474606609","heartbeat_back_addr":"192.168.123.103:6815/3474606609","heartbeat_front_addr":"192.168.123.103:6813/3474606609","state":["exists","up"]},{"osd":6,"uuid":"310b60d4-374c-41a3-be5a-54d72a8a8262","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":35,"up_thru":36,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6817","nonce":3652588290}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6819","nonce":3652588290}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6823","nonce":3652588290}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6821","nonce":3652588290}]},"public_addr":"192.168.123.103:6817/3652588290","cluster_addr":"192.168.123.103:6819/3652588290","heartbeat_back_addr":"192.168.123.103:6823/3652588290","heartbeat_front_addr":"192.168.123.103:6821/3652588290","state":["exists","up"]},{"osd":7,"uuid":"4c157a84-f3cc-493a-ae93-8da2f0c9dd62","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6825","nonce":3293964311}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6827","nonce":3293964311}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6831","nonce":3293964311}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6829","nonce":3293964311}]},"public_addr":"192.168.123.103:6825/3293964311","cluster_addr":"192.168.123.103:6827/3293964311","heartbeat_back_addr":"192.168.123.103:6831/3293964311","heartbeat_front_addr":"192.168.123.103:6829/3293964311","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:08.075727+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:16.061623+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:23.727100+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:30.891897+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:39.098208+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:47.199096+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:55.070496+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:48:04.774100+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:0/1641161112":"2026-03-11T09:48:14.180692+0000","192.168.123.100:6800/2246671766":"2026-03-11T09:48:14.180692+0000","192.168.123.100:0/2733987987":"2026-03-11T09:48:14.180692+0000","192.168.123.100:0/1578175387":"2026-03-11T09:48:14.180692+0000","192.168.123.100:0/2424658185":"2026-03-11T09:48:14.180692+0000","192.168.123.100:6801/254974794":"2026-03-11T09:46:30.848027+0000","192.168.123.100:6800/254974794":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/2863919683":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/3022761531":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/1870828082":"2026-03-11T09:46:30.848027+0000","192.168.123.100:6801/2246671766":"2026-03-11T09:48:14.180692+0000","192.168.123.100:0/1008100373":"2026-03-11T09:46:19.963160+0000","192.168.123.100:6800/3678185549":"2026-03-11T09:46:19.963160+0000","192.168.123.100:0/607966756":"2026-03-11T09:46:19.963160+0000","192.168.123.100:6801/3678185549":"2026-03-11T09:46:19.963160+0000","192.168.123.100:0/2045903313":"2026-03-11T09:46:19.963160+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T09:48:20.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[70832]: level=info ts=2026-03-10T09:48:19.948Z caller=coordinator.go:113 component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T09:48:20.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[70832]: level=info ts=2026-03-10T09:48:19.948Z caller=coordinator.go:126 component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T09:48:20.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[70832]: level=info ts=2026-03-10T09:48:19.951Z caller=main.go:518 msg=Listening address=:9093 2026-03-10T09:48:20.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[70832]: level=info ts=2026-03-10T09:48:19.951Z caller=tls_config.go:191 msg="TLS is disabled." http2=false 2026-03-10T09:48:20.573 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-10T09:48:20.573 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd dump --format=json 2026-03-10T09:48:20.652 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:20] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:20.652 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[52384]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:20.652 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.652 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.652 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.652 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2880570235' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T09:48:20.652 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.652 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.652 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[56720]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:20.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2880570235' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T09:48:20.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:20 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:20.933 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:20 vm03 ceph-mon[50536]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:20 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:20 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:20 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:20 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2880570235' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T09:48:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:20 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:20 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:20 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:21.745 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:21.745 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":43,"fsid":"e2d4b2ee-1c65-11f1-bae0-b525704df8fa","created":"2026-03-10T09:46:02.779534+0000","modified":"2026-03-10T09:48:14.180734+0000","last_up_change":"2026-03-10T09:48:06.665497+0000","last_in_change":"2026-03-10T09:47:55.097554+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T09:47:25.514600+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"74379a85-5860-4155-922a-ce2adedc2262","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6803","nonce":398256759}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6805","nonce":398256759}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6809","nonce":398256759}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":398256759},{"type":"v1","addr":"192.168.123.100:6807","nonce":398256759}]},"public_addr":"192.168.123.100:6803/398256759","cluster_addr":"192.168.123.100:6805/398256759","heartbeat_back_addr":"192.168.123.100:6809/398256759","heartbeat_front_addr":"192.168.123.100:6807/398256759","state":["exists","up"]},{"osd":1,"uuid":"718beebe-c05a-490a-835a-00fdd797508b","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6811","nonce":1400892823}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6813","nonce":1400892823}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6817","nonce":1400892823}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":1400892823},{"type":"v1","addr":"192.168.123.100:6815","nonce":1400892823}]},"public_addr":"192.168.123.100:6811/1400892823","cluster_addr":"192.168.123.100:6813/1400892823","heartbeat_back_addr":"192.168.123.100:6817/1400892823","heartbeat_front_addr":"192.168.123.100:6815/1400892823","state":["exists","up"]},{"osd":2,"uuid":"b8c6c746-4058-4653-b11b-b1cb8e4cd332","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":15,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6819","nonce":2605451956}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6821","nonce":2605451956}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6825","nonce":2605451956}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":2605451956},{"type":"v1","addr":"192.168.123.100:6823","nonce":2605451956}]},"public_addr":"192.168.123.100:6819/2605451956","cluster_addr":"192.168.123.100:6821/2605451956","heartbeat_back_addr":"192.168.123.100:6825/2605451956","heartbeat_front_addr":"192.168.123.100:6823/2605451956","state":["exists","up"]},{"osd":3,"uuid":"c20101af-d2e8-44bf-8684-17f240235e25","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6827","nonce":3956173294}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6829","nonce":3956173294}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6833","nonce":3956173294}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":3956173294},{"type":"v1","addr":"192.168.123.100:6831","nonce":3956173294}]},"public_addr":"192.168.123.100:6827/3956173294","cluster_addr":"192.168.123.100:6829/3956173294","heartbeat_back_addr":"192.168.123.100:6833/3956173294","heartbeat_front_addr":"192.168.123.100:6831/3956173294","state":["exists","up"]},{"osd":4,"uuid":"fa9a846e-614f-4e56-875b-56d674cbe31c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6801","nonce":83821511}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6803","nonce":83821511}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6807","nonce":83821511}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":83821511},{"type":"v1","addr":"192.168.123.103:6805","nonce":83821511}]},"public_addr":"192.168.123.103:6801/83821511","cluster_addr":"192.168.123.103:6803/83821511","heartbeat_back_addr":"192.168.123.103:6807/83821511","heartbeat_front_addr":"192.168.123.103:6805/83821511","state":["exists","up"]},{"osd":5,"uuid":"f2832919-d86c-4c2f-b301-84a2776e8ec6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":30,"up_thru":31,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6809","nonce":3474606609}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6811","nonce":3474606609}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6815","nonce":3474606609}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3474606609},{"type":"v1","addr":"192.168.123.103:6813","nonce":3474606609}]},"public_addr":"192.168.123.103:6809/3474606609","cluster_addr":"192.168.123.103:6811/3474606609","heartbeat_back_addr":"192.168.123.103:6815/3474606609","heartbeat_front_addr":"192.168.123.103:6813/3474606609","state":["exists","up"]},{"osd":6,"uuid":"310b60d4-374c-41a3-be5a-54d72a8a8262","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":35,"up_thru":36,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6817","nonce":3652588290}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6819","nonce":3652588290}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6823","nonce":3652588290}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":3652588290},{"type":"v1","addr":"192.168.123.103:6821","nonce":3652588290}]},"public_addr":"192.168.123.103:6817/3652588290","cluster_addr":"192.168.123.103:6819/3652588290","heartbeat_back_addr":"192.168.123.103:6823/3652588290","heartbeat_front_addr":"192.168.123.103:6821/3652588290","state":["exists","up"]},{"osd":7,"uuid":"4c157a84-f3cc-493a-ae93-8da2f0c9dd62","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6825","nonce":3293964311}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6827","nonce":3293964311}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6831","nonce":3293964311}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":3293964311},{"type":"v1","addr":"192.168.123.103:6829","nonce":3293964311}]},"public_addr":"192.168.123.103:6825/3293964311","cluster_addr":"192.168.123.103:6827/3293964311","heartbeat_back_addr":"192.168.123.103:6831/3293964311","heartbeat_front_addr":"192.168.123.103:6829/3293964311","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:08.075727+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:16.061623+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:23.727100+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:30.891897+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:39.098208+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:47.199096+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:47:55.070496+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T09:48:04.774100+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:0/1641161112":"2026-03-11T09:48:14.180692+0000","192.168.123.100:6800/2246671766":"2026-03-11T09:48:14.180692+0000","192.168.123.100:0/2733987987":"2026-03-11T09:48:14.180692+0000","192.168.123.100:0/1578175387":"2026-03-11T09:48:14.180692+0000","192.168.123.100:0/2424658185":"2026-03-11T09:48:14.180692+0000","192.168.123.100:6801/254974794":"2026-03-11T09:46:30.848027+0000","192.168.123.100:6800/254974794":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/2863919683":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/3022761531":"2026-03-11T09:46:30.848027+0000","192.168.123.100:0/1870828082":"2026-03-11T09:46:30.848027+0000","192.168.123.100:6801/2246671766":"2026-03-11T09:48:14.180692+0000","192.168.123.100:0/1008100373":"2026-03-11T09:46:19.963160+0000","192.168.123.100:6800/3678185549":"2026-03-11T09:46:19.963160+0000","192.168.123.100:0/607966756":"2026-03-11T09:46:19.963160+0000","192.168.123.100:6801/3678185549":"2026-03-11T09:46:19.963160+0000","192.168.123.100:0/2045903313":"2026-03-11T09:46:19.963160+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T09:48:22.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:21 vm00 ceph-mon[52384]: Deploying daemon node-exporter.a on vm00 2026-03-10T09:48:22.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:21 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[70832]: level=info ts=2026-03-10T09:48:21.894Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000534925s 2026-03-10T09:48:22.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:21 vm00 ceph-mon[56720]: Deploying daemon node-exporter.a on vm00 2026-03-10T09:48:22.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:21 vm03 ceph-mon[50536]: Deploying daemon node-exporter.a on vm00 2026-03-10T09:48:24.054 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:23 vm00 ceph-mon[52384]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:24.054 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:23 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2231527001' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T09:48:24.054 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:23 vm00 ceph-mon[56720]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:24.054 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:23 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2231527001' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T09:48:24.072 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph tell osd.0 flush_pg_stats 2026-03-10T09:48:24.072 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph tell osd.1 flush_pg_stats 2026-03-10T09:48:24.072 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph tell osd.2 flush_pg_stats 2026-03-10T09:48:24.072 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph tell osd.3 flush_pg_stats 2026-03-10T09:48:24.072 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph tell osd.4 flush_pg_stats 2026-03-10T09:48:24.072 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph tell osd.5 flush_pg_stats 2026-03-10T09:48:24.072 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph tell osd.6 flush_pg_stats 2026-03-10T09:48:24.072 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph tell osd.7 flush_pg_stats 2026-03-10T09:48:24.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:23 vm03 ceph-mon[50536]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:24.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:23 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2231527001' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T09:48:24.570 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:24 vm00 bash[71234]: Writing manifest to image destination 2026-03-10T09:48:24.869 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:24 vm00 podman[71234]: 2026-03-10 09:48:24.750674698 +0000 UTC m=+3.221387794 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-10T09:48:24.869 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:24 vm00 podman[71234]: 2026-03-10 09:48:24.779274815 +0000 UTC m=+3.249987901 container create dfbf18fec33b9f58e77312ca3031790315aae980c990470757b2b34365c1e890 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:25.126 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:25.126 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:25.184 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:24 vm03 ceph-mon[50536]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:25.242 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:25.267 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:25.275 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:24 vm00 ceph-mon[52384]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:25.275 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:24 vm00 ceph-mon[56720]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 podman[71234]: 2026-03-10 09:48:25.038298061 +0000 UTC m=+3.509011156 container init dfbf18fec33b9f58e77312ca3031790315aae980c990470757b2b34365c1e890 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 podman[71234]: 2026-03-10 09:48:25.041729684 +0000 UTC m=+3.512442770 container start dfbf18fec33b9f58e77312ca3031790315aae980c990470757b2b34365c1e890 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.044Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.044Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.045Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.045Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.045Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.045Z caller=node_exporter.go:115 level=info collector=arp 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.045Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.045Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.045Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.045Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=edac 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-10T09:48:25.275 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=os 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=stat 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=time 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=uname 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.046Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[71725]: ts=2026-03-10T09:48:25.047Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 bash[71234]: dfbf18fec33b9f58e77312ca3031790315aae980c990470757b2b34365c1e890 2026-03-10T09:48:25.276 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:48:25 vm00 systemd[1]: Started Ceph node-exporter.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:48:25.796 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:25.798 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:25 vm03 systemd[1]: Starting Ceph node-exporter.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:48:25.812 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:26.224 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:25 vm03 bash[64445]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-10T09:48:26.303 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:26.305 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:26.336 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:26 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:26.336 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:26 vm00 ceph-mon[52384]: Deploying daemon node-exporter.b on vm03 2026-03-10T09:48:26.336 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:26 vm00 ceph-mon[52384]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:26.342 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:26 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:26.342 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:26 vm00 ceph-mon[56720]: Deploying daemon node-exporter.b on vm03 2026-03-10T09:48:26.342 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:26 vm00 ceph-mon[56720]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:26 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:26 vm03 ceph-mon[50536]: Deploying daemon node-exporter.b on vm03 2026-03-10T09:48:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:26 vm03 ceph-mon[50536]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:26.963 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:26] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:27.257 INFO:teuthology.orchestra.run.vm00.stdout:171798691845 2026-03-10T09:48:27.257 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd last-stat-seq osd.7 2026-03-10T09:48:27.298 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:26 vm03 bash[64445]: Getting image source signatures 2026-03-10T09:48:27.298 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:26 vm03 bash[64445]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-10T09:48:27.298 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:26 vm03 bash[64445]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-10T09:48:27.298 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:26 vm03 bash[64445]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-10T09:48:27.538 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:27] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:27.577 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:27.761 INFO:teuthology.orchestra.run.vm00.stdout:128849018889 2026-03-10T09:48:27.761 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd last-stat-seq osd.5 2026-03-10T09:48:27.832 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:27 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:27.832 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:27 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:27.832 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:27 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:27.832 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:27 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:27.832 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:27 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:27.832 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:27 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:27.832 INFO:teuthology.orchestra.run.vm00.stdout:64424509454 2026-03-10T09:48:27.832 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd last-stat-seq osd.2 2026-03-10T09:48:28.032 INFO:teuthology.orchestra.run.vm00.stdout:107374182410 2026-03-10T09:48:28.032 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd last-stat-seq osd.4 2026-03-10T09:48:28.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:28.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:28.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 bash[64445]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 bash[64445]: Writing manifest to image destination 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 podman[64445]: 2026-03-10 09:48:27.685988845 +0000 UTC m=+1.860546510 container create e8ec5e902bbcbb6a4fd1de239a49dff725ab197735bc19bffeb99e4b04825c5b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 podman[64445]: 2026-03-10 09:48:27.719676515 +0000 UTC m=+1.894234191 container init e8ec5e902bbcbb6a4fd1de239a49dff725ab197735bc19bffeb99e4b04825c5b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 podman[64445]: 2026-03-10 09:48:27.723536289 +0000 UTC m=+1.898093954 container start e8ec5e902bbcbb6a4fd1de239a49dff725ab197735bc19bffeb99e4b04825c5b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 bash[64445]: e8ec5e902bbcbb6a4fd1de239a49dff725ab197735bc19bffeb99e4b04825c5b 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 podman[64445]: 2026-03-10 09:48:27.680008863 +0000 UTC m=+1.854566528 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 systemd[1]: Started Ceph node-exporter.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.730Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.730Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.730Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.730Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=arp 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-10T09:48:28.048 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=edac 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=os 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=stat 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=time 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=uname 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-10T09:48:28.049 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:48:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[64500]: ts=2026-03-10T09:48:27.731Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-10T09:48:28.374 INFO:teuthology.orchestra.run.vm00.stdout:34359738385 2026-03-10T09:48:28.374 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd last-stat-seq osd.0 2026-03-10T09:48:28.400 INFO:teuthology.orchestra.run.vm00.stdout:47244640271 2026-03-10T09:48:28.400 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd last-stat-seq osd.1 2026-03-10T09:48:28.608 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:28.631 INFO:teuthology.orchestra.run.vm00.stdout:90194313228 2026-03-10T09:48:28.631 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd last-stat-seq osd.3 2026-03-10T09:48:28.712 INFO:teuthology.orchestra.run.vm00.stdout:150323855367 2026-03-10T09:48:28.712 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph osd last-stat-seq osd.6 2026-03-10T09:48:28.762 INFO:teuthology.orchestra.run.vm00.stdout:171798691845 2026-03-10T09:48:28.907 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:28 vm00 ceph-mon[56720]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:28.907 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:28 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:28.908 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:28 vm00 ceph-mon[56720]: Deploying daemon prometheus.a on vm03 2026-03-10T09:48:28.908 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:28 vm00 ceph-mon[52384]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:28.908 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:28 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:28.908 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:28 vm00 ceph-mon[52384]: Deploying daemon prometheus.a on vm03 2026-03-10T09:48:29.021 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:28 vm03 ceph-mon[50536]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:29.022 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:28 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:29.022 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:28 vm03 ceph-mon[50536]: Deploying daemon prometheus.a on vm03 2026-03-10T09:48:29.070 INFO:tasks.cephadm.ceph_manager.ceph:need seq 171798691845 got 171798691845 for osd.7 2026-03-10T09:48:29.070 DEBUG:teuthology.parallel:result is None 2026-03-10T09:48:29.809 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:29.846 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:29.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:29 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3659159451' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T09:48:29.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:29 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:29.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:29 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:29.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:29] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:29.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:29 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3659159451' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T09:48:29.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:29 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:29.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:29 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:29.875 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:29.881 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:29.886 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:29.925 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:30.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:29 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3659159451' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T09:48:30.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:29 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:29 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.101 INFO:teuthology.orchestra.run.vm00.stdout:64424509454 2026-03-10T09:48:30.160 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[70832]: level=info ts=2026-03-10T09:48:29.897Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.003538346s 2026-03-10T09:48:30.359 INFO:tasks.cephadm.ceph_manager.ceph:need seq 64424509454 got 64424509454 for osd.2 2026-03-10T09:48:30.359 DEBUG:teuthology.parallel:result is None 2026-03-10T09:48:30.419 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:30] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:30.681 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:30] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:30.984 INFO:teuthology.orchestra.run.vm00.stdout:128849018889 2026-03-10T09:48:30.984 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[56720]: pgmap v10: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:30.984 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/382829877' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T09:48:30.984 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.984 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.984 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.984 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.984 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.986 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[52384]: pgmap v10: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:30.986 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/382829877' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T09:48:30.986 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.986 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.986 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.986 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:30.986 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:30 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:31.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:30 vm03 ceph-mon[50536]: pgmap v10: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:31.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:30 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/382829877' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T09:48:31.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:30 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:31.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:30 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:31.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:30 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:31.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:30 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:31.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:30 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:31.117 INFO:teuthology.orchestra.run.vm00.stdout:150323855367 2026-03-10T09:48:31.155 INFO:tasks.cephadm.ceph_manager.ceph:need seq 128849018889 got 128849018889 for osd.5 2026-03-10T09:48:31.156 DEBUG:teuthology.parallel:result is None 2026-03-10T09:48:31.300 INFO:tasks.cephadm.ceph_manager.ceph:need seq 150323855367 got 150323855367 for osd.6 2026-03-10T09:48:31.300 DEBUG:teuthology.parallel:result is None 2026-03-10T09:48:31.304 INFO:teuthology.orchestra.run.vm00.stdout:34359738385 2026-03-10T09:48:31.321 INFO:teuthology.orchestra.run.vm00.stdout:90194313228 2026-03-10T09:48:31.367 INFO:teuthology.orchestra.run.vm00.stdout:107374182410 2026-03-10T09:48:31.394 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313228 got 90194313228 for osd.3 2026-03-10T09:48:31.395 DEBUG:teuthology.parallel:result is None 2026-03-10T09:48:31.441 INFO:teuthology.orchestra.run.vm00.stdout:47244640271 2026-03-10T09:48:31.462 INFO:tasks.cephadm.ceph_manager.ceph:need seq 107374182410 got 107374182410 for osd.4 2026-03-10T09:48:31.462 DEBUG:teuthology.parallel:result is None 2026-03-10T09:48:31.471 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738385 got 34359738385 for osd.0 2026-03-10T09:48:31.471 DEBUG:teuthology.parallel:result is None 2026-03-10T09:48:31.510 INFO:tasks.cephadm.ceph_manager.ceph:need seq 47244640271 got 47244640271 for osd.1 2026-03-10T09:48:31.510 DEBUG:teuthology.parallel:result is None 2026-03-10T09:48:31.510 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-10T09:48:31.510 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph pg dump --format=json 2026-03-10T09:48:31.717 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:31.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2043358269' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T09:48:31.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1345178386' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T09:48:31.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1259692336' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T09:48:31.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/326145407' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T09:48:31.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3417373655' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T09:48:31.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3311007622' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2043358269' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1345178386' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1259692336' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/326145407' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3417373655' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3311007622' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2043358269' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1345178386' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1259692336' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/326145407' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3417373655' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T09:48:31.859 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3311007622' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T09:48:32.074 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:32.078 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-10T09:48:32.150 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":11,"stamp":"2026-03-10T09:48:31.171628+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48544,"kb_used_data":4448,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690848,"statfs":{"total":171765137408,"available":171715428352,"internally_reserved":0,"allocated":4554752,"data_stored":2575678,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"12.003560"},"pg_stats":[{"pgid":"1.0","version":"43'87","reported_seq":56,"reported_epoch":43,"state":"active+clean","last_fresh":"2026-03-10T09:48:15.279913+0000","last_change":"2026-03-10T09:48:09.005921+0000","last_active":"2026-03-10T09:48:15.279913+0000","last_peered":"2026-03-10T09:48:15.279913+0000","last_clean":"2026-03-10T09:48:15.279913+0000","last_became_active":"2026-03-10T09:48:08.699827+0000","last_became_peered":"2026-03-10T09:48:08.699827+0000","last_unstale":"2026-03-10T09:48:15.279913+0000","last_undegraded":"2026-03-10T09:48:15.279913+0000","last_fullsized":"2026-03-10T09:48:15.279913+0000","mapping_epoch":41,"log_start":"0'0","ondisk_log_start":"0'0","created":16,"last_epoch_clean":42,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T09:47:26.316001+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T09:47:26.316001+0000","last_clean_scrub_stamp":"2026-03-10T09:47:26.316001+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T20:24:12.171291+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":40,"seq":171798691845,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6056,"kb_used_data":800,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961368,"statfs":{"total":21470642176,"available":21464440832,"internally_reserved":0,"allocated":819200,"data_stored":570531,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42299999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.627}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46000000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61499999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65100000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.40799999999999997}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73499999999999999}]}]},{"osd":6,"up_from":35,"seq":150323855367,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6056,"kb_used_data":800,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961368,"statfs":{"total":21470642176,"available":21464440832,"internally_reserved":0,"allocated":819200,"data_stored":570531,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78300000000000003}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74299999999999999}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81200000000000006}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70999999999999996}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.94899999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84599999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82399999999999995}]}]},{"osd":1,"up_from":11,"seq":47244640271,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6236,"kb_used_data":404,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961188,"statfs":{"total":21470642176,"available":21464256512,"internally_reserved":0,"allocated":413696,"data_stored":172376,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Tue Mar 10 09:48:17 2026","interfaces":[{"interface":"back","average":{"1min":0.68999999999999995,"5min":0.68999999999999995,"15min":0.68999999999999995},"min":{"1min":0.216,"5min":0.216,"15min":0.216},"max":{"1min":3.3370000000000002,"5min":3.3370000000000002,"15min":3.3370000000000002},"last":0.63600000000000001},{"interface":"front","average":{"1min":0.69599999999999995,"5min":0.69599999999999995,"15min":0.69599999999999995},"min":{"1min":0.217,"5min":0.217,"15min":0.217},"max":{"1min":3.298,"5min":3.298,"15min":3.298},"last":0.498}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64700000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":2.794}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72599999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67500000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71999999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78400000000000003}]}]},{"osd":0,"up_from":8,"seq":34359738385,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6700,"kb_used_data":804,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960724,"statfs":{"total":21470642176,"available":21463781376,"internally_reserved":0,"allocated":823296,"data_stored":570846,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Tue Mar 10 09:48:21 2026","interfaces":[{"interface":"back","average":{"1min":0.56499999999999995,"5min":0.56499999999999995,"15min":0.56499999999999995},"min":{"1min":0.25900000000000001,"5min":0.25900000000000001,"15min":0.25900000000000001},"max":{"1min":1.119,"5min":1.119,"15min":1.119},"last":0.59999999999999998},{"interface":"front","average":{"1min":0.42599999999999999,"5min":0.42599999999999999,"15min":0.42599999999999999},"min":{"1min":0.20399999999999999,"5min":0.20399999999999999,"15min":0.20399999999999999},"max":{"1min":1.157,"5min":1.157,"15min":1.157},"last":0.249}]},{"osd":2,"last update":"Tue Mar 10 09:48:27 2026","interfaces":[{"interface":"back","average":{"1min":0.67700000000000005,"5min":0.67700000000000005,"15min":0.67700000000000005},"min":{"1min":0.34399999999999997,"5min":0.34399999999999997,"15min":0.34399999999999997},"max":{"1min":1.3360000000000001,"5min":1.3360000000000001,"15min":1.3360000000000001},"last":1.3360000000000001},{"interface":"front","average":{"1min":0.496,"5min":0.496,"15min":0.496},"min":{"1min":0.23699999999999999,"5min":0.23699999999999999,"15min":0.23699999999999999},"max":{"1min":1.3120000000000001,"5min":1.3120000000000001,"15min":1.3120000000000001},"last":1.3120000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64000000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63300000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57199999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67000000000000004}]}]},{"osd":2,"up_from":15,"seq":64424509454,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6244,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961180,"statfs":{"total":21470642176,"available":21464248320,"internally_reserved":0,"allocated":421888,"data_stored":173006,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63700000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.503}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56000000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60799999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48699999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58999999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63}]}]},{"osd":3,"up_from":21,"seq":90194313228,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":173006,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55500000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83699999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54700000000000004}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69299999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66600000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82799999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59699999999999998}]}]},{"osd":4,"up_from":25,"seq":107374182410,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5796,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961628,"statfs":{"total":21470642176,"available":21464707072,"internally_reserved":0,"allocated":421888,"data_stored":173006,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97799999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.88}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42499999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83899999999999997}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85699999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53100000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.40200000000000002}]}]},{"osd":5,"up_from":30,"seq":128849018889,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5724,"kb_used_data":404,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961700,"statfs":{"total":21470642176,"available":21464780800,"internally_reserved":0,"allocated":413696,"data_stored":172376,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46999999999999997}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57299999999999995}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76900000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54400000000000004}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72299999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74299999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51900000000000002}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T09:48:32.151 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph pg dump --format=json 2026-03-10T09:48:32.208 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 systemd[1]: Starting Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:48:32.338 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 podman[65199]: 2026-03-10 09:48:32.208031398 +0000 UTC m=+0.019004409 container create 650ef4d78b8af2da3f02182c78e5eb67f20fac4d43d18abba9b2fd177fbe44ae (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 podman[65199]: 2026-03-10 09:48:32.240679362 +0000 UTC m=+0.051652373 container init 650ef4d78b8af2da3f02182c78e5eb67f20fac4d43d18abba9b2fd177fbe44ae (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 podman[65199]: 2026-03-10 09:48:32.243578308 +0000 UTC m=+0.054551319 container start 650ef4d78b8af2da3f02182c78e5eb67f20fac4d43d18abba9b2fd177fbe44ae (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 bash[65199]: 650ef4d78b8af2da3f02182c78e5eb67f20fac4d43d18abba9b2fd177fbe44ae 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 podman[65199]: 2026-03-10 09:48:32.200249064 +0000 UTC m=+0.011222084 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 systemd[1]: Started Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.282Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.282Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.282Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.282Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm03 (none))" 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.282Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.282Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.288Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.288Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.293Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.294Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.294Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.823µs 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.294Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.294Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=0 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.294Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=19.817µs wal_replay_duration=92.343µs total_replay_duration=128.561µs 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.294Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.294Z caller=main.go:947 level=info msg="TSDB started" 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.294Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.304Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=10.089704ms db_storage=641ns remote_storage=1.503µs web_handler=291ns query_engine=651ns scrape=1.002888ms scrape_sd=19.417µs notify=15.81µs notify_sd=7.935µs rules=8.829285ms 2026-03-10T09:48:32.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:48:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:48:32.304Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-10T09:48:32.799 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:32.802 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-10T09:48:32.863 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":11,"stamp":"2026-03-10T09:48:31.171628+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48544,"kb_used_data":4448,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690848,"statfs":{"total":171765137408,"available":171715428352,"internally_reserved":0,"allocated":4554752,"data_stored":2575678,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"12.003560"},"pg_stats":[{"pgid":"1.0","version":"43'87","reported_seq":56,"reported_epoch":43,"state":"active+clean","last_fresh":"2026-03-10T09:48:15.279913+0000","last_change":"2026-03-10T09:48:09.005921+0000","last_active":"2026-03-10T09:48:15.279913+0000","last_peered":"2026-03-10T09:48:15.279913+0000","last_clean":"2026-03-10T09:48:15.279913+0000","last_became_active":"2026-03-10T09:48:08.699827+0000","last_became_peered":"2026-03-10T09:48:08.699827+0000","last_unstale":"2026-03-10T09:48:15.279913+0000","last_undegraded":"2026-03-10T09:48:15.279913+0000","last_fullsized":"2026-03-10T09:48:15.279913+0000","mapping_epoch":41,"log_start":"0'0","ondisk_log_start":"0'0","created":16,"last_epoch_clean":42,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T09:47:26.316001+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T09:47:26.316001+0000","last_clean_scrub_stamp":"2026-03-10T09:47:26.316001+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T20:24:12.171291+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":40,"seq":171798691845,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6056,"kb_used_data":800,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961368,"statfs":{"total":21470642176,"available":21464440832,"internally_reserved":0,"allocated":819200,"data_stored":570531,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42299999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.627}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46000000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61499999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65100000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.40799999999999997}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73499999999999999}]}]},{"osd":6,"up_from":35,"seq":150323855367,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6056,"kb_used_data":800,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961368,"statfs":{"total":21470642176,"available":21464440832,"internally_reserved":0,"allocated":819200,"data_stored":570531,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78300000000000003}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74299999999999999}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81200000000000006}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70999999999999996}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.94899999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84599999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82399999999999995}]}]},{"osd":1,"up_from":11,"seq":47244640271,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6236,"kb_used_data":404,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961188,"statfs":{"total":21470642176,"available":21464256512,"internally_reserved":0,"allocated":413696,"data_stored":172376,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Tue Mar 10 09:48:17 2026","interfaces":[{"interface":"back","average":{"1min":0.68999999999999995,"5min":0.68999999999999995,"15min":0.68999999999999995},"min":{"1min":0.216,"5min":0.216,"15min":0.216},"max":{"1min":3.3370000000000002,"5min":3.3370000000000002,"15min":3.3370000000000002},"last":0.63600000000000001},{"interface":"front","average":{"1min":0.69599999999999995,"5min":0.69599999999999995,"15min":0.69599999999999995},"min":{"1min":0.217,"5min":0.217,"15min":0.217},"max":{"1min":3.298,"5min":3.298,"15min":3.298},"last":0.498}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64700000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":2.794}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72599999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67500000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71999999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78400000000000003}]}]},{"osd":0,"up_from":8,"seq":34359738385,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6700,"kb_used_data":804,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960724,"statfs":{"total":21470642176,"available":21463781376,"internally_reserved":0,"allocated":823296,"data_stored":570846,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Tue Mar 10 09:48:21 2026","interfaces":[{"interface":"back","average":{"1min":0.56499999999999995,"5min":0.56499999999999995,"15min":0.56499999999999995},"min":{"1min":0.25900000000000001,"5min":0.25900000000000001,"15min":0.25900000000000001},"max":{"1min":1.119,"5min":1.119,"15min":1.119},"last":0.59999999999999998},{"interface":"front","average":{"1min":0.42599999999999999,"5min":0.42599999999999999,"15min":0.42599999999999999},"min":{"1min":0.20399999999999999,"5min":0.20399999999999999,"15min":0.20399999999999999},"max":{"1min":1.157,"5min":1.157,"15min":1.157},"last":0.249}]},{"osd":2,"last update":"Tue Mar 10 09:48:27 2026","interfaces":[{"interface":"back","average":{"1min":0.67700000000000005,"5min":0.67700000000000005,"15min":0.67700000000000005},"min":{"1min":0.34399999999999997,"5min":0.34399999999999997,"15min":0.34399999999999997},"max":{"1min":1.3360000000000001,"5min":1.3360000000000001,"15min":1.3360000000000001},"last":1.3360000000000001},{"interface":"front","average":{"1min":0.496,"5min":0.496,"15min":0.496},"min":{"1min":0.23699999999999999,"5min":0.23699999999999999,"15min":0.23699999999999999},"max":{"1min":1.3120000000000001,"5min":1.3120000000000001,"15min":1.3120000000000001},"last":1.3120000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64000000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63300000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57199999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67000000000000004}]}]},{"osd":2,"up_from":15,"seq":64424509454,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6244,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961180,"statfs":{"total":21470642176,"available":21464248320,"internally_reserved":0,"allocated":421888,"data_stored":173006,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63700000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.503}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56000000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60799999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48699999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58999999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63}]}]},{"osd":3,"up_from":21,"seq":90194313228,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":173006,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55500000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83699999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54700000000000004}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69299999999999995}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66600000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82799999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59699999999999998}]}]},{"osd":4,"up_from":25,"seq":107374182410,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5796,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961628,"statfs":{"total":21470642176,"available":21464707072,"internally_reserved":0,"allocated":421888,"data_stored":173006,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97799999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.88}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42499999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.83899999999999997}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85699999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53100000000000003}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.40200000000000002}]}]},{"osd":5,"up_from":30,"seq":128849018889,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5724,"kb_used_data":404,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961700,"statfs":{"total":21470642176,"available":21464780800,"internally_reserved":0,"allocated":413696,"data_stored":172376,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46999999999999997}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57299999999999995}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76900000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54400000000000004}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72299999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.74299999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51900000000000002}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T09:48:32.864 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-10T09:48:32.864 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-10T09:48:32.864 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-10T09:48:32.864 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph health --format=json 2026-03-10T09:48:32.898 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[52384]: pgmap v11: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:32.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:32.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:32.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:32.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:32.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:32.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 systemd[1]: Stopping Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:48:32.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 bash[74172]: Error: no container with name or ID "ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager.a" found: no such container 2026-03-10T09:48:32.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[70832]: level=info ts=2026-03-10T09:48:32.851Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-10T09:48:32.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 podman[74183]: 2026-03-10 09:48:32.865021626 +0000 UTC m=+0.033777249 container died 60ecdf0d00059ab92a78e10fbfdf6b1d1c20d9c29b496ca8419fe4427c6ab832 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:32.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 podman[74183]: 2026-03-10 09:48:32.882307414 +0000 UTC m=+0.051063027 container remove 60ecdf0d00059ab92a78e10fbfdf6b1d1c20d9c29b496ca8419fe4427c6ab832 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:32.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 podman[74183]: 2026-03-10 09:48:32.883561993 +0000 UTC m=+0.052317606 volume remove e30fdf39cbc22ca72af11cc90f2a26524546b8c862d90b968d2359962c2f7636 2026-03-10T09:48:32.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 bash[74183]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a 2026-03-10T09:48:32.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[56720]: pgmap v11: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:32.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:32.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:32.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:32.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:32.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:32 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:32.908 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:32 vm03 ceph-mon[50536]: pgmap v11: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:32.908 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:32 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:32.908 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:32 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:32.908 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:32 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:32.908 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:32 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:32.908 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:32 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.040 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/mon.a/config 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 bash[74231]: Error: no container with name or ID "ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager.a" found: no such container 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@alertmanager.a.service: Deactivated successfully. 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 systemd[1]: Stopped Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:32 vm00 systemd[1]: Starting Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 podman[74287]: 2026-03-10 09:48:33.059381465 +0000 UTC m=+0.027340410 volume create adfa6748053afa4cad34c7c0259fc532b1f1cc4d04d707bf94b226bdf4cb2f1f 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 podman[74287]: 2026-03-10 09:48:33.065467809 +0000 UTC m=+0.033426754 container create 276d5952c1656724392f5ba5fd3d904afd453f7beb8ac9f6753903255d8d3248 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 podman[74287]: 2026-03-10 09:48:33.10228106 +0000 UTC m=+0.070240006 container init 276d5952c1656724392f5ba5fd3d904afd453f7beb8ac9f6753903255d8d3248 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 podman[74287]: 2026-03-10 09:48:33.108209809 +0000 UTC m=+0.076168754 container start 276d5952c1656724392f5ba5fd3d904afd453f7beb8ac9f6753903255d8d3248 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 bash[74287]: 276d5952c1656724392f5ba5fd3d904afd453f7beb8ac9f6753903255d8d3248 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 podman[74287]: 2026-03-10 09:48:33.049589987 +0000 UTC m=+0.017548941 image pull ba2b418f427c0636d654de8757e830c80168e76482bcc46bb2138e569d6c91d4 quay.io/prometheus/alertmanager:v0.23.0 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 systemd[1]: Started Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:33.145Z caller=main.go:225 msg="Starting Alertmanager" version="(version=0.23.0, branch=HEAD, revision=61046b17771a57cfd4c4a51be370ab930a4d7d54)" 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:33.145Z caller=main.go:226 build_context="(go=go1.16.7, user=root@e21a959be8d2, date=20210825-10:48:55)" 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:33.146Z caller=cluster.go:184 component=cluster msg="setting advertise address explicitly" addr=192.168.123.100 port=9094 2026-03-10T09:48:33.169 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:33.147Z caller=cluster.go:671 component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-10T09:48:33.536 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:33] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:33.536 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:33.203Z caller=coordinator.go:113 component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T09:48:33.536 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:33.204Z caller=coordinator.go:126 component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T09:48:33.536 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:33.214Z caller=main.go:518 msg=Listening address=:9093 2026-03-10T09:48:33.536 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:33.214Z caller=tls_config.go:191 msg="TLS is disabled." http2=false 2026-03-10T09:48:33.536 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T09:48:33.536 INFO:teuthology.orchestra.run.vm00.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-10T09:48:33.585 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-10T09:48:33.585 INFO:tasks.cephadm:Setup complete, yielding 2026-03-10T09:48:33.585 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T09:48:33.588 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm00.local 2026-03-10T09:48:33.588 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- bash -c 'ceph config set mgr mgr/cephadm/use_repo_digest false --force' 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='client.24451 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: Reconfiguring daemon alertmanager.a on vm00 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='client.14535 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.103:9095"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2167311888' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T09:48:33.792 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='client.24451 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: Reconfiguring daemon alertmanager.a on vm00 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='client.14535 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.103:9095"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:33.793 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:33 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2167311888' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='client.24451 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: Reconfiguring daemon alertmanager.a on vm00 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='client.14535 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:48:34.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.103:9095"}]: dispatch 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:34.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:33 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2167311888' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T09:48:34.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:33] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:34.194 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T09:48:34.196 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm00.local 2026-03-10T09:48:34.196 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin realm create --rgw-realm=r --default' 2026-03-10T09:48:34.417 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:34 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:34] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: pgmap v12: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.103:9095"}]: dispatch 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2571921333' entity='client.admin' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: pgmap v12: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.103:9095"}]: dispatch 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2571921333' entity='client.admin' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:34 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: pgmap v12: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.103:9095"}]: dispatch 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2571921333' entity='client.admin' 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:34 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:35.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:35 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:35.148Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000857217s 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[52384]: Deploying daemon grafana.a on vm03 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[52384]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1612674096' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[56720]: Deploying daemon grafana.a on vm03 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[56720]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1612674096' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T09:48:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:35 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:36.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:35 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:48:36.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:35 vm03 ceph-mon[50536]: Deploying daemon grafana.a on vm03 2026-03-10T09:48:36.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:35 vm03 ceph-mon[50536]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T09:48:36.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:35 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1612674096' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T09:48:36.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:35 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:36.508 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:48:36.508 INFO:teuthology.orchestra.run.vm00.stdout: "id": "48ccffad-b8a6-4fd9-b4c3-9ce0f0914afe", 2026-03-10T09:48:36.508 INFO:teuthology.orchestra.run.vm00.stdout: "name": "r", 2026-03-10T09:48:36.508 INFO:teuthology.orchestra.run.vm00.stdout: "current_period": "d0332a7d-3c2d-4759-a723-7cba15900ab5", 2026-03-10T09:48:36.508 INFO:teuthology.orchestra.run.vm00.stdout: "epoch": 1 2026-03-10T09:48:36.508 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:48:36.567 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zonegroup create --rgw-zonegroup=default --master --default' 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "id": "a660c581-1549-45a7-a0e3-b95fddf915aa", 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "name": "default", 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "api_name": "default", 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "is_master": "true", 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "endpoints": [], 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "hostnames": [], 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "hostnames_s3website": [], 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "master_zone": "", 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "zones": [], 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "placement_targets": [], 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "default_placement": "", 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "realm_id": "48ccffad-b8a6-4fd9-b4c3-9ce0f0914afe", 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "sync_policy": { 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: "groups": [] 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:36.925 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:48:36.961 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default' 2026-03-10T09:48:37.110 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:36 vm00 ceph-mon[52384]: pgmap v13: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:37.110 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:36 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1612674096' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T09:48:37.110 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:36 vm00 ceph-mon[52384]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T09:48:37.110 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:36 vm00 ceph-mon[56720]: pgmap v13: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:37.110 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:36 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1612674096' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T09:48:37.110 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:36 vm00 ceph-mon[56720]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T09:48:37.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:36 vm03 ceph-mon[50536]: pgmap v13: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:37.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:36 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1612674096' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T09:48:37.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:36 vm03 ceph-mon[50536]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "id": "78c5734e-9435-47a7-ad4b-7b175d4d1d90", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "name": "z", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "domain_root": "z.rgw.meta:root", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "control_pool": "z.rgw.control", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "gc_pool": "z.rgw.log:gc", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "lc_pool": "z.rgw.log:lc", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "log_pool": "z.rgw.log", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "intent_log_pool": "z.rgw.log:intent", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "usage_log_pool": "z.rgw.log:usage", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "roles_pool": "z.rgw.meta:roles", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "reshard_pool": "z.rgw.log:reshard", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "user_keys_pool": "z.rgw.meta:users.keys", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "user_email_pool": "z.rgw.meta:users.email", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "user_swift_pool": "z.rgw.meta:users.swift", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "user_uid_pool": "z.rgw.meta:users.uid", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "otp_pool": "z.rgw.otp", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "system_key": { 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "access_key": "", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "secret_key": "" 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "placement_pools": [ 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "key": "default-placement", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "val": { 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "index_pool": "z.rgw.buckets.index", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "storage_classes": { 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "STANDARD": { 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "data_pool": "z.rgw.buckets.data" 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "data_extra_pool": "z.rgw.buckets.non-ec", 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "index_type": 0 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:48:37.384 INFO:teuthology.orchestra.run.vm00.stdout: "realm_id": "48ccffad-b8a6-4fd9-b4c3-9ce0f0914afe", 2026-03-10T09:48:37.385 INFO:teuthology.orchestra.run.vm00.stdout: "notif_pool": "z.rgw.log:notif" 2026-03-10T09:48:37.385 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:48:37.418 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin period update --rgw-realm=r --commit' 2026-03-10T09:48:39.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:38 vm03 ceph-mon[50536]: pgmap v16: 33 pgs: 32 creating+peering, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:39.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:38 vm03 ceph-mon[50536]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T09:48:39.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:38 vm00 ceph-mon[52384]: pgmap v16: 33 pgs: 32 creating+peering, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:39.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:38 vm00 ceph-mon[52384]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T09:48:39.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:38 vm00 ceph-mon[56720]: pgmap v16: 33 pgs: 32 creating+peering, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:39.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:38 vm00 ceph-mon[56720]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T09:48:39.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:39 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:48:39] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:48:40.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:39 vm00 ceph-mon[52384]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T09:48:40.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:39 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1324917936' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T09:48:40.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:39 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T09:48:40.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:39 vm00 ceph-mon[56720]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T09:48:40.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:39 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1324917936' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T09:48:40.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:39 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T09:48:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:39 vm03 ceph-mon[50536]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T09:48:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:39 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1324917936' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T09:48:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:39 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[52384]: pgmap v19: 65 pgs: 32 unknown, 32 creating+peering, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[52384]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[52384]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1324917936' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[56720]: pgmap v19: 65 pgs: 32 unknown, 32 creating+peering, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[56720]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[56720]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T09:48:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:40 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1324917936' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T09:48:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:40 vm03 ceph-mon[50536]: pgmap v19: 65 pgs: 32 unknown, 32 creating+peering, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:40 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-10T09:48:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:40 vm03 ceph-mon[50536]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T09:48:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:40 vm03 ceph-mon[50536]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T09:48:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:40 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1324917936' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T09:48:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:41 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T09:48:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:41 vm03 ceph-mon[50536]: pgmap v22: 97 pgs: 64 unknown, 32 creating+peering, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:42.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:41 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T09:48:42.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:41 vm00 ceph-mon[52384]: pgmap v22: 97 pgs: 64 unknown, 32 creating+peering, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:42.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:41 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T09:48:42.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:41 vm00 ceph-mon[56720]: pgmap v22: 97 pgs: 64 unknown, 32 creating+peering, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[56720]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[56720]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1205114833' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[52384]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[52384]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1205114833' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:42 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T09:48:43.151 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:48:43] "GET /metrics HTTP/1.1" 200 191129 "" "Prometheus/2.33.4" 2026-03-10T09:48:43.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:42 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-10T09:48:43.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:42 vm03 ceph-mon[50536]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T09:48:43.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:42 vm03 ceph-mon[50536]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T09:48:43.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:42 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1205114833' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T09:48:43.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:42 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T09:48:43.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:48:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:48:43.151Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.003991735s 2026-03-10T09:48:45.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:44 vm00 ceph-mon[52384]: pgmap v25: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 7.5 KiB/s rd, 4.0 KiB/s wr, 12 op/s 2026-03-10T09:48:45.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:44 vm00 ceph-mon[56720]: pgmap v25: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 7.5 KiB/s rd, 4.0 KiB/s wr, 12 op/s 2026-03-10T09:48:45.141 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:44 vm03 ceph-mon[50536]: pgmap v25: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 7.5 KiB/s rd, 4.0 KiB/s wr, 12 op/s 2026-03-10T09:48:45.548 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 systemd[1]: Starting Ceph grafana.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 podman[66043]: 2026-03-10 09:48:45.56061 +0000 UTC m=+0.099995772 container create 738bbf68f8eed9c4cabf15e79c12cfd8cde267dbf7502496a8cf38a6f2322b3e (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 8, name=ubi8, vendor=Red Hat, Inc., summary=Grafana Container configured for Ceph mgr/dashboard integration, release=236.1648460182, maintainer=Paul Cuzner , io.openshift.tags=base rhel8, io.buildah.version=1.24.2, distribution-scope=public, architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, build-date=2022-03-28T10:36:18.413762, description=Ceph Grafana Container, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, io.openshift.expose-services=, com.redhat.component=ubi8-container, version=8.5, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 podman[66043]: 2026-03-10 09:48:45.470219556 +0000 UTC m=+0.009605338 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 podman[66043]: 2026-03-10 09:48:45.595485624 +0000 UTC m=+0.134871406 container init 738bbf68f8eed9c4cabf15e79c12cfd8cde267dbf7502496a8cf38a6f2322b3e (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, io.buildah.version=1.24.2, description=Ceph Grafana Container, vendor=Red Hat, Inc., architecture=x86_64, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 8, com.redhat.component=ubi8-container, distribution-scope=public, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=236.1648460182, name=ubi8, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Paul Cuzner , summary=Grafana Container configured for Ceph mgr/dashboard integration, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.openshift.tags=base rhel8, version=8.5, build-date=2022-03-28T10:36:18.413762, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56) 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 podman[66043]: 2026-03-10 09:48:45.598027991 +0000 UTC m=+0.137413773 container start 738bbf68f8eed9c4cabf15e79c12cfd8cde267dbf7502496a8cf38a6f2322b3e (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, description=Ceph Grafana Container, vendor=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, io.openshift.tags=base rhel8, distribution-scope=public, build-date=2022-03-28T10:36:18.413762, maintainer=Paul Cuzner , summary=Grafana Container configured for Ceph mgr/dashboard integration, name=ubi8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=236.1648460182, io.k8s.display-name=Red Hat Universal Base Image 8, version=8.5, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, vcs-type=git, io.openshift.expose-services=, com.redhat.component=ubi8-container, io.buildah.version=1.24.2) 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 bash[66043]: 738bbf68f8eed9c4cabf15e79c12cfd8cde267dbf7502496a8cf38a6f2322b3e 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 systemd[1]: Started Ceph grafana.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="App mode production" logger=settings 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create migration_log table" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create user table" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.login" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.email" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_login - v1" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_email - v1" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table user to user_v1 - v1" 2026-03-10T09:48:45.811 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create user table v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_login - v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_email - v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table user_v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column help_flags1 to user table" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update user table charset" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add last_seen_at column to user" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add missing user data" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_disabled column to user" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index user.login/user.email" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_service_account column to user" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create temp user table v1-7" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v1-7" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v1-7" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v1-7" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v1-7" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update temp_user table charset" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_email - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_org_id - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_code - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_status - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table temp_user to temp_user_tmp_qwerty - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create temp_user v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy temp_user v1 to v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop temp_user_tmp_qwerty" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Set created for temp users that will otherwise prematurely expire" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create star table" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index star.user_id_dashboard_id" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create org table v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_name - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create org_user table v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_org_id - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_user_org_id_user_id - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_user_id - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update org table charset" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update org_user table charset" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate all Read Only Viewers to Viewers" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard table" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard.account_id" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_account_id_slug" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_tag table" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_tag.dasboard_id_term" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_dashboard_tag_dashboard_id_term - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard to dashboard_v1 - v1" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_org_id - v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_org_id_slug - v2" 2026-03-10T09:48:45.812 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard v1 to v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_v1" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard.data to mediumtext v1" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column updated_by in dashboard - v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column created_by in dashboard - v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column gnetId in dashboard" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for gnetId in dashboard" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_id in dashboard" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for plugin_id in dashboard" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_id in dashboard_tag" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard table charset" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_tag table charset" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column folder_id in dashboard" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column isFolder in dashboard" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column has_acl in dashboard" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in dashboard" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in dashboard" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index dashboard_org_id_uid" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_slug" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard title length" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index for dashboard_org_id_title_folder_id" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard_provisioning to dashboard_provisioning_tmp_qwerty - v1" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id - v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id_name - v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard_provisioning v1 to v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop dashboard_provisioning_tmp_qwerty" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add check_sum column" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_title" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete tags for deleted dashboards" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete stars for deleted dashboards" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_is_folder" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index data_source.account_id" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index data_source.account_id_name" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_data_source_account_id - v1" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_data_source_account_id_name - v1" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table data_source to data_source_v1 - v1" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_data_source_org_id - v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_data_source_org_id_name - v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table data_source_v1 #2" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column with_credentials" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add secure json data column" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update data_source table charset" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update initial version to 1" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add read_only data column" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate logging ds to loki ds" 2026-03-10T09:48:45.813 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update json_data with nulls" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add uid column" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid value" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index datasource_org_id_uid" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index datasource_org_id_is_default" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.key" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id_name" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_api_key_account_id - v1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_key - v1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_account_id_name - v1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table api_key to api_key_v1 - v1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table v2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_api_key_org_id - v2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_key - v2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_org_id_name - v2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy api_key v1 to v2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table api_key_v1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update api_key table charset" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add expires to api_key table" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add service account foreign key" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v4" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_snapshot_v4 #1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v5 #2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_key - v5" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_delete_key - v5" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_snapshot_user_id - v5" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_snapshot to mediumtext v2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_snapshot table charset" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external_delete_url to dashboard_snapshots table" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add encrypted dashboard json column" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Change dashboard_encrypted column to MEDIUMBLOB" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create quota table v1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_quota_org_id_user_id_target - v1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update quota table charset" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create plugin_setting table" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_plugin_setting_org_id_plugin_id - v1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_version to plugin_settings" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update plugin_setting table charset" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create session table" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist table" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist_item table" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist table v2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist item table v2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist table charset" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist_item table charset" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v2" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v3" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create preferences table v3" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update preferences table charset" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column team_id in preferences" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update team_id column values in preferences" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column week_start in preferences" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert table v1" 2026-03-10T09:48:45.814 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1205114833' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1205114833' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.103:3000"}]: dispatch 2026-03-10T09:48:45.815 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:45 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert org_id & id " 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert state" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert dashboard_id" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v1" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_rule_tag.alert_id_tag_id" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_alert_rule_tag_alert_id_tag_id - v1" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table alert_rule_tag to alert_rule_tag_v1 - v1" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v2" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_alert_rule_tag_alert_id_tag_id - Add unique index alert_rule_tag.alert_id_tag_id V2" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy alert_rule_tag v1 to v2" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop table alert_rule_tag_v1" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification table v1" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column is_default" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column frequency" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column send_reminder" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column disable_resolve_message" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification org_id & name" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert table charset" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert_notification table charset" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create notification_journal table v1" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index notification_journal org_id & alert_id & notifier_id" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_notification_journal" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification_state table v1" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification_state org_id & alert_id & notifier_id" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add for to alert table" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in alert_notification" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in alert_notification" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_notification_org_id_uid" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_name" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column secure_settings in alert_notification" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert.settings to mediumtext" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_notification_state_alert_id" 2026-03-10T09:48:45.815 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_rule_tag_alert_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old annotation table v4" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create annotation table v5" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 0 v3" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 1 v3" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 2 v3" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 3 v3" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 4 v3" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update annotation table charset" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column region_id to annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop category_id index" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column tags to annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v2" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index annotation_tag.annotation_id_tag_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_annotation_tag_annotation_id_tag_id - v2" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table annotation_tag to annotation_tag_v2 - v2" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v3" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_annotation_tag_annotation_id_tag_id - Add unique index annotation_tag.annotation_id_tag_id V3" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy annotation_tag v2 to v3" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop table annotation_tag_v2" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert annotations and set TEXT to empty" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add created time to annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add updated time to annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for created in annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for updated in annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Convert existing annotations from seconds to milliseconds" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add epoch_end column" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for epoch_end" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Make epoch_end the same as epoch" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Move region to single row" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch from annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_dashboard_id_panel_id_epoch from annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_dashboard_id_epoch_end_epoch on annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_epoch_end_epoch on annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch_epoch_end from annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for alert_id on annotation table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create test_data table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_version table v1" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_version.dashboard_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_version.dashboard_id and dashboard_version.version" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Set dashboard version to 1 where 0" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="save existing dashboard data in dashboard_version table v1" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_version.data to mediumtext v1" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create team table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index team.org_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_org_id_name" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create team member table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.org_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_member_org_id_team_id_user_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.team_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column email to team table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external to team_member table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column permission to team_member table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard acl table" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_dashboard_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_user_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_team_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_user_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_team_id" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_org_id_role" 2026-03-10T09:48:45.816 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_permission" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="save default acl rules in dashboard_acl table" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete acl rules for deleted dashboards and folders" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create tag table" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index tag.key_value" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create login attempt table" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index login_attempt.username" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_login_attempt_username - v1" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table login_attempt to login_attempt_tmp_qwerty - v1" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create login_attempt v2" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_login_attempt_username - v2" 2026-03-10T09:48:45.817 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy login_attempt v1 to v2" 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1205114833' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1205114833' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.103:3000"}]: dispatch 2026-03-10T09:48:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1205114833' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1205114833' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.103:3000"}]: dispatch 2026-03-10T09:48:46.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:45 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop login_attempt_tmp_qwerty" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth table" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_user_auth_auth_module_auth_id - v1" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter user_auth.auth_id to length 190" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth access token to user_auth" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth refresh token to user_auth" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth token type to user_auth" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth expiry to user_auth" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index to user_id column in user_auth" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create server_lock table" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index server_lock.operation_uid" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth token table" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.auth_token" 2026-03-10T09:48:46.292 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.prev_auth_token" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_auth_token.user_id" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add revoked_at to the user auth token" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create cache_data table" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index cache_data.cache_key" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create short_url table v1" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index short_url.org_id-uid" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition table" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition table" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and title columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and uid columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition table data column to mediumtext in mysql" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and title columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and uid columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and title columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and uid columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column paused in alert_definition" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition table" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition_version table" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition_version table" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_id and version columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_uid and version columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition_version table data column to mediumtext in mysql" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition_version table" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_instance table" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, def_uid and current_state columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, current_state columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column current_state_end to alert_instance" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, def_uid, current_state on alert_instance" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, current_state on alert_instance" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_org_id to rule_org_id in alert_instance" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_uid to rule_uid in alert_instance" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, rule_uid, current_state on alert_instance" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, current_state on alert_instance" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule table" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and title columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and uid columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespace_uid, group_uid columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule table data column to mediumtext in mysql" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="remove unique index from alert_rule on org_id, title columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespase_uid and title columns" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add dashboard_uid column to alert_rule" 2026-03-10T09:48:46.293 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add panel_id column to alert_rule" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, dashboard_uid and panel_id columns" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule_version table" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_uid and version columns" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_namespace_uid and rule_group columns" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule_version table data column to mediumtext in mysql" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule_version" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule_version" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule_version" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id=create_alert_configuration_table 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column default in alert_configuration" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="alert alert_configuration alertmanager_configuration column from TEXT to MEDIUMTEXT if mysql" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column org_id in alert_configuration" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_configuration table on org_id column" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id=create_ngalert_configuration_table 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in ngalert_configuration on org_id column" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="clear migration entry \"remove unified alerting data\"" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="move dashboard alerts to unified alerting" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element table v1" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element org_id-folder_id-name-kind" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element_connection table v1" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element_connection element_id-kind-connection_id" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index library_element org_id_uid" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="clone move dashboard alerts to unified alerting" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create data_keys table" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create kv_store table v1" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index kv_store.org_id-namespace-key" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="update dashboard_uid and panel_id from existing annotations" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create permission table" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index permission.role_id" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_id_action_scope" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create role table" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column display_name" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column group_name" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index role.org_id" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_org_id_name" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index role_org_id_uid" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create team role table" 2026-03-10T09:48:46.294 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.org_id" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_role_org_id_team_id_role_id" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.team_id" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create user role table" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.org_id" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_role_org_id_user_id_role_id" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.user_id" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create builtin role table" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.role_id" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.name" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column org_id to builtin_role table" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.org_id" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_org_id_role_id_role" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index role_org_id_uid" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role.uid" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="create seed assignment table" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_role_name" 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="migrations completed" logger=migrator performed=381 skipped=0 duration=256.343553ms 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=warn msg="[Deprecated] the datasource provisioning config is outdated. please upgrade" logger=provisioning.datasources filename=/etc/grafana/provisioning/datasources/ceph-dashboard.yml 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-10T09:48:46.295 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:48:45 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:48:45+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-10T09:48:46.443 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "id": "24786e02-fecc-48db-8a0f-c25d6faec021", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "epoch": 1, 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "predecessor_uuid": "d0332a7d-3c2d-4759-a723-7cba15900ab5", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "sync_status": [], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "period_map": { 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "id": "24786e02-fecc-48db-8a0f-c25d6faec021", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "zonegroups": [ 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "id": "a660c581-1549-45a7-a0e3-b95fddf915aa", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "name": "default", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "api_name": "default", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "is_master": "true", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "endpoints": [], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "hostnames": [], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "hostnames_s3website": [], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "master_zone": "78c5734e-9435-47a7-ad4b-7b175d4d1d90", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "zones": [ 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "id": "78c5734e-9435-47a7-ad4b-7b175d4d1d90", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "name": "z", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "endpoints": [], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "log_meta": "false", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "log_data": "false", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "bucket_index_max_shards": 11, 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "read_only": "false", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "tier_type": "", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "sync_from_all": "true", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "sync_from": [], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "redirect_zone": "" 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "placement_targets": [ 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "name": "default-placement", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "tags": [], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "storage_classes": [ 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "STANDARD" 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "default_placement": "default-placement", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "realm_id": "48ccffad-b8a6-4fd9-b4c3-9ce0f0914afe", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "sync_policy": { 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "groups": [] 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "short_zone_ids": [ 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "key": "78c5734e-9435-47a7-ad4b-7b175d4d1d90", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "val": 2619121602 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "master_zonegroup": "a660c581-1549-45a7-a0e3-b95fddf915aa", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "master_zone": "78c5734e-9435-47a7-ad4b-7b175d4d1d90", 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "period_config": { 2026-03-10T09:48:46.444 INFO:teuthology.orchestra.run.vm00.stdout: "bucket_quota": { 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "check_on_raw": false, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_size": -1, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_size_kb": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_objects": -1 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "user_quota": { 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "check_on_raw": false, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_size": -1, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_size_kb": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_objects": -1 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "user_ratelimit": { 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_ops": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_ops": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_bytes": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_bytes": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "bucket_ratelimit": { 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_ops": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_ops": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_bytes": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_bytes": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "anonymous_ratelimit": { 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_ops": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_ops": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_bytes": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_bytes": 0, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "realm_id": "48ccffad-b8a6-4fd9-b4c3-9ce0f0914afe", 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "realm_name": "r", 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout: "realm_epoch": 2 2026-03-10T09:48:46.445 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:48:46.481 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000' 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: pgmap v27: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.9 KiB/s rd, 3.7 KiB/s wr, 11 op/s 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.103:3000"}]: dispatch 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-10T09:48:46.697 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[52384]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: pgmap v27: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.9 KiB/s rd, 3.7 KiB/s wr, 11 op/s 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.103:3000"}]: dispatch 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-10T09:48:46.700 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:46 vm00 ceph-mon[56720]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: pgmap v27: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.9 KiB/s rd, 3.7 KiB/s wr, 11 op/s 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.103:3000"}]: dispatch 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-10T09:48:46.912 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:46 vm03 ceph-mon[50536]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T09:48:46.962 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled rgw.foo update... 2026-03-10T09:48:47.031 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply rgw smpl' 2026-03-10T09:48:47.513 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:47] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:47.620 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled rgw.smpl update... 2026-03-10T09:48:47.691 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph osd pool create foo' 2026-03-10T09:48:47.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:47] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:48.239 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='client.14598 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:48.239 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: Saving service rgw.foo spec with placement count:2 2026-03-10T09:48:48.239 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: pgmap v30: 129 pgs: 129 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 1 op/s 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='client.24461 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "smpl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: Saving service rgw.smpl spec with placement count:2 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: Saving service rgw.foo spec with placement count:2 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='client.14598 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: Saving service rgw.foo spec with placement count:2 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: pgmap v30: 129 pgs: 129 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 1 op/s 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='client.24461 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "smpl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: Saving service rgw.smpl spec with placement count:2 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: Saving service rgw.foo spec with placement count:2 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:48.240 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.241 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:47 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='client.14598 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: Saving service rgw.foo spec with placement count:2 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: pgmap v30: 129 pgs: 129 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 1 op/s 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='client.24461 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "smpl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: Saving service rgw.smpl spec with placement count:2 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:48.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: Saving service rgw.foo spec with placement count:2 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:48.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:47 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:49.024 INFO:teuthology.orchestra.run.vm00.stderr:pool 'foo' created 2026-03-10T09:48:49.072 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:48 vm00 ceph-mon[52384]: Deploying daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T09:48:49.072 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:48 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2717966353' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T09:48:49.072 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:48 vm00 ceph-mon[56720]: Deploying daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T09:48:49.072 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:48 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2717966353' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T09:48:49.132 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'rbd pool init foo' 2026-03-10T09:48:49.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:48 vm03 ceph-mon[50536]: Deploying daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T09:48:49.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:48 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2717966353' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T09:48:49.584 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:49 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:49] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:49.705 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:49 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:48:49] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2717966353' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: pgmap v32: 161 pgs: 32 unknown, 129 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 410 B/s wr, 1 op/s 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: Deploying daemon rgw.foo.vm03.smqfat on vm03 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1162124667' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T09:48:50.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:50 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2717966353' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: pgmap v32: 161 pgs: 32 unknown, 129 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 410 B/s wr, 1 op/s 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: Deploying daemon rgw.foo.vm03.smqfat on vm03 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1162124667' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2717966353' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: pgmap v32: 161 pgs: 32 unknown, 129 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 410 B/s wr, 1 op/s 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: Deploying daemon rgw.foo.vm03.smqfat on vm03 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1162124667' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T09:48:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:50 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T09:48:51.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-10T09:48:51.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: osdmap e56: 8 total, 8 up, 8 in 2026-03-10T09:48:51.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: Saving service rgw.smpl spec with placement count:2 2026-03-10T09:48:51.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:51.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:51.103 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.103 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:51.103 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[52384]: Deploying daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: osdmap e56: 8 total, 8 up, 8 in 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: Saving service rgw.smpl spec with placement count:2 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:51.104 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:51 vm00 ceph-mon[56720]: Deploying daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T09:48:51.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-10T09:48:51.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: osdmap e56: 8 total, 8 up, 8 in 2026-03-10T09:48:51.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: Saving service rgw.smpl spec with placement count:2 2026-03-10T09:48:51.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:51.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:51.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:51.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:51.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:51 vm03 ceph-mon[50536]: Deploying daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T09:48:52.097 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[52384]: osdmap e57: 8 total, 8 up, 8 in 2026-03-10T09:48:52.097 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[52384]: pgmap v35: 161 pgs: 32 unknown, 129 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 411 B/s wr, 1 op/s 2026-03-10T09:48:52.097 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:52.097 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:52.097 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:52.097 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:52.098 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:52.098 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[56720]: osdmap e57: 8 total, 8 up, 8 in 2026-03-10T09:48:52.098 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[56720]: pgmap v35: 161 pgs: 32 unknown, 129 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 411 B/s wr, 1 op/s 2026-03-10T09:48:52.098 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:52.098 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:52.098 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:52.098 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:52.098 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:52 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:52.140 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply iscsi foo u p' 2026-03-10T09:48:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:52 vm03 ceph-mon[50536]: osdmap e57: 8 total, 8 up, 8 in 2026-03-10T09:48:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:52 vm03 ceph-mon[50536]: pgmap v35: 161 pgs: 32 unknown, 129 active+clean; 451 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 411 B/s wr, 1 op/s 2026-03-10T09:48:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:52 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:52 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:48:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:52 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T09:48:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:52 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:52 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:52.736 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:52] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:53.139 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled iscsi.foo update... 2026-03-10T09:48:53.189 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:53 vm03 ceph-mon[50536]: Deploying daemon rgw.smpl.vm03.evsibt on vm03 2026-03-10T09:48:53.190 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:53 vm03 ceph-mon[50536]: osdmap e58: 8 total, 8 up, 8 in 2026-03-10T09:48:53.190 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:53 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:53.227 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 120' 2026-03-10T09:48:53.307 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:53 vm00 ceph-mon[52384]: Deploying daemon rgw.smpl.vm03.evsibt on vm03 2026-03-10T09:48:53.307 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:53 vm00 ceph-mon[52384]: osdmap e58: 8 total, 8 up, 8 in 2026-03-10T09:48:53.307 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:53 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:53.307 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:53 vm00 ceph-mon[56720]: Deploying daemon rgw.smpl.vm03.evsibt on vm03 2026-03-10T09:48:53.307 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:53 vm00 ceph-mon[56720]: osdmap e58: 8 total, 8 up, 8 in 2026-03-10T09:48:53.307 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:53 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:53.308 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:48:53] "GET /metrics HTTP/1.1" 200 196161 "" "Prometheus/2.33.4" 2026-03-10T09:48:53.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:53] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:54.412 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:54 vm03 ceph-mon[50536]: from='client.14658 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:54.412 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:54 vm03 ceph-mon[50536]: Saving service iscsi.foo spec with placement count:1 2026-03-10T09:48:54.412 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:54 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:54.412 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:54 vm03 ceph-mon[50536]: pgmap v37: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 10 KiB/s wr, 167 op/s 2026-03-10T09:48:54.412 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:54 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:54.412 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:54 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:54.412 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:54 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:54.412 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:54 vm03 ceph-mon[50536]: Checking dashboard <-> RGW credentials 2026-03-10T09:48:54.412 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:54 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[52384]: from='client.14658 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[52384]: Saving service iscsi.foo spec with placement count:1 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[52384]: pgmap v37: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 10 KiB/s wr, 167 op/s 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[52384]: Checking dashboard <-> RGW credentials 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[56720]: from='client.14658 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[56720]: Saving service iscsi.foo spec with placement count:1 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[56720]: pgmap v37: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 10 KiB/s wr, 167 op/s 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:54.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[56720]: Checking dashboard <-> RGW credentials 2026-03-10T09:48:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:54 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:55.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:54 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:54] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:55.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:55] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:55.647 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:55] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:56.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:56.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:55 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:55 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:56.391 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:56] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:56.818 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:56] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:56.818 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:48:56] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: pgmap v38: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 47 KiB/s rd, 7.0 KiB/s wr, 116 op/s 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: Checking dashboard <-> RGW credentials 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: pgmap v38: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 47 KiB/s rd, 7.0 KiB/s wr, 116 op/s 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: Checking dashboard <-> RGW credentials 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:57.081 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: pgmap v38: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 47 KiB/s rd, 7.0 KiB/s wr, 116 op/s 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: Checking dashboard <-> RGW credentials 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:57.082 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:58.050 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: Checking pool "foo" exists for service iscsi.foo 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: Deploying daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 331 KiB/s rd, 6.6 KiB/s wr, 562 op/s 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: Checking pool "foo" exists for service iscsi.foo 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: Deploying daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 331 KiB/s rd, 6.6 KiB/s wr, 562 op/s 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:48:58.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:48:58.052 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.052 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:58.052 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:58.052 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.052 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: Checking pool "foo" exists for service iscsi.foo 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: Deploying daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 331 KiB/s rd, 6.6 KiB/s wr, 562 op/s 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:48:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:48:59.036 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:58 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3589376362' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:48:59.036 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:58 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1720324838' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1641161112"}]: dispatch 2026-03-10T09:48:59.036 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:58 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3589376362' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:48:59.036 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:58 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1720324838' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1641161112"}]: dispatch 2026-03-10T09:48:59.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:58 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3589376362' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:48:59.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:58 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1720324838' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1641161112"}]: dispatch 2026-03-10T09:48:59.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:59 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:59] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:48:59.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:48:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:48:59] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1720324838' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1641161112"}]': finished 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[52384]: osdmap e59: 8 total, 8 up, 8 in 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[52384]: mgrmap e21: y(active, since 44s), standbys: x 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[52384]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 296 KiB/s rd, 5.9 KiB/s wr, 503 op/s 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1421235657' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2246671766"}]: dispatch 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1720324838' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1641161112"}]': finished 2026-03-10T09:49:00.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[56720]: osdmap e59: 8 total, 8 up, 8 in 2026-03-10T09:49:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[56720]: mgrmap e21: y(active, since 44s), standbys: x 2026-03-10T09:49:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[56720]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 296 KiB/s rd, 5.9 KiB/s wr, 503 op/s 2026-03-10T09:49:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1421235657' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2246671766"}]: dispatch 2026-03-10T09:49:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:48:59 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:00.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:48:59 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:48:59] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:49:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:59 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1720324838' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1641161112"}]': finished 2026-03-10T09:49:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:59 vm03 ceph-mon[50536]: osdmap e59: 8 total, 8 up, 8 in 2026-03-10T09:49:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:59 vm03 ceph-mon[50536]: mgrmap e21: y(active, since 44s), standbys: x 2026-03-10T09:49:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:59 vm03 ceph-mon[50536]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 296 KiB/s rd, 5.9 KiB/s wr, 503 op/s 2026-03-10T09:49:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:59 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:59 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1421235657' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2246671766"}]: dispatch 2026-03-10T09:49:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:59 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:48:59 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1421235657' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2246671766"}]': finished 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: osdmap e60: 8 total, 8 up, 8 in 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/127896401' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2733987987"}]: dispatch 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/127896401' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2733987987"}]': finished 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: osdmap e61: 8 total, 8 up, 8 in 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2657526153' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1578175387"}]: dispatch 2026-03-10T09:49:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:00 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1578175387"}]: dispatch 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1421235657' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2246671766"}]': finished 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: osdmap e60: 8 total, 8 up, 8 in 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/127896401' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2733987987"}]: dispatch 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/127896401' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2733987987"}]': finished 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: osdmap e61: 8 total, 8 up, 8 in 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2657526153' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1578175387"}]: dispatch 2026-03-10T09:49:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1578175387"}]: dispatch 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1421235657' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2246671766"}]': finished 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: osdmap e60: 8 total, 8 up, 8 in 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/127896401' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2733987987"}]: dispatch 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/127896401' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2733987987"}]': finished 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: osdmap e61: 8 total, 8 up, 8 in 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2657526153' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1578175387"}]: dispatch 2026-03-10T09:49:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:00 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1578175387"}]: dispatch 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[52384]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 347 KiB/s rd, 853 B/s wr, 555 op/s 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1578175387"}]': finished 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[52384]: osdmap e62: 8 total, 8 up, 8 in 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3147486194' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2424658185"}]: dispatch 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2424658185"}]: dispatch 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[56720]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 347 KiB/s rd, 853 B/s wr, 555 op/s 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1578175387"}]': finished 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[56720]: osdmap e62: 8 total, 8 up, 8 in 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3147486194' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2424658185"}]: dispatch 2026-03-10T09:49:02.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:02 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2424658185"}]: dispatch 2026-03-10T09:49:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:02 vm03 ceph-mon[50536]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 88 MiB used, 160 GiB / 160 GiB avail; 347 KiB/s rd, 853 B/s wr, 555 op/s 2026-03-10T09:49:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:02 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1578175387"}]': finished 2026-03-10T09:49:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:02 vm03 ceph-mon[50536]: osdmap e62: 8 total, 8 up, 8 in 2026-03-10T09:49:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:02 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3147486194' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2424658185"}]: dispatch 2026-03-10T09:49:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:02 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2424658185"}]: dispatch 2026-03-10T09:49:03.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:03] "GET /metrics HTTP/1.1" 200 214477 "" "Prometheus/2.33.4" 2026-03-10T09:49:03.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:03 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2424658185"}]': finished 2026-03-10T09:49:03.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:03 vm00 ceph-mon[52384]: osdmap e63: 8 total, 8 up, 8 in 2026-03-10T09:49:03.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:03 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3243652668' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/254974794"}]: dispatch 2026-03-10T09:49:03.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:03 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/254974794"}]: dispatch 2026-03-10T09:49:03.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:03 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2424658185"}]': finished 2026-03-10T09:49:03.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:03 vm00 ceph-mon[56720]: osdmap e63: 8 total, 8 up, 8 in 2026-03-10T09:49:03.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:03 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3243652668' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/254974794"}]: dispatch 2026-03-10T09:49:03.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:03 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/254974794"}]: dispatch 2026-03-10T09:49:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:03.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:03.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:04.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:03 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2424658185"}]': finished 2026-03-10T09:49:04.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:03 vm03 ceph-mon[50536]: osdmap e63: 8 total, 8 up, 8 in 2026-03-10T09:49:04.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:03 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3243652668' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/254974794"}]: dispatch 2026-03-10T09:49:04.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:03 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/254974794"}]: dispatch 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[52384]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 255 B/s wr, 52 op/s 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/254974794"}]': finished 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[52384]: osdmap e64: 8 total, 8 up, 8 in 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3977184586' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/254974794"}]: dispatch 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/254974794"}]: dispatch 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[56720]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 255 B/s wr, 52 op/s 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/254974794"}]': finished 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[56720]: osdmap e64: 8 total, 8 up, 8 in 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3977184586' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/254974794"}]: dispatch 2026-03-10T09:49:04.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:04 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/254974794"}]: dispatch 2026-03-10T09:49:05.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:04 vm03 ceph-mon[50536]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 255 B/s wr, 52 op/s 2026-03-10T09:49:05.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:04 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/254974794"}]': finished 2026-03-10T09:49:05.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:04 vm03 ceph-mon[50536]: osdmap e64: 8 total, 8 up, 8 in 2026-03-10T09:49:05.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:04 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3977184586' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/254974794"}]: dispatch 2026-03-10T09:49:05.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:04 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/254974794"}]: dispatch 2026-03-10T09:49:06.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:05 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/254974794"}]': finished 2026-03-10T09:49:06.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:05 vm00 ceph-mon[52384]: osdmap e65: 8 total, 8 up, 8 in 2026-03-10T09:49:06.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:05 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1328202943' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2863919683"}]: dispatch 2026-03-10T09:49:06.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:05 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/254974794"}]': finished 2026-03-10T09:49:06.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:05 vm00 ceph-mon[56720]: osdmap e65: 8 total, 8 up, 8 in 2026-03-10T09:49:06.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:05 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1328202943' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2863919683"}]: dispatch 2026-03-10T09:49:06.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:05 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/254974794"}]': finished 2026-03-10T09:49:06.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:05 vm03 ceph-mon[50536]: osdmap e65: 8 total, 8 up, 8 in 2026-03-10T09:49:06.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:05 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1328202943' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2863919683"}]: dispatch 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[52384]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 255 B/s wr, 52 op/s 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1328202943' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2863919683"}]': finished 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[52384]: osdmap e66: 8 total, 8 up, 8 in 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4057172285' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3022761531"}]: dispatch 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3022761531"}]: dispatch 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[56720]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 255 B/s wr, 52 op/s 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1328202943' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2863919683"}]': finished 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[56720]: osdmap e66: 8 total, 8 up, 8 in 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4057172285' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3022761531"}]: dispatch 2026-03-10T09:49:07.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:06 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3022761531"}]: dispatch 2026-03-10T09:49:07.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:06 vm03 ceph-mon[50536]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 255 B/s wr, 52 op/s 2026-03-10T09:49:07.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:06 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1328202943' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2863919683"}]': finished 2026-03-10T09:49:07.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:06 vm03 ceph-mon[50536]: osdmap e66: 8 total, 8 up, 8 in 2026-03-10T09:49:07.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:06 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4057172285' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3022761531"}]: dispatch 2026-03-10T09:49:07.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:06 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3022761531"}]: dispatch 2026-03-10T09:49:08.091 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:07 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3022761531"}]': finished 2026-03-10T09:49:08.091 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:07 vm00 ceph-mon[52384]: osdmap e67: 8 total, 8 up, 8 in 2026-03-10T09:49:08.091 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:07 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2652267837' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1870828082"}]: dispatch 2026-03-10T09:49:08.091 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:07 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3022761531"}]': finished 2026-03-10T09:49:08.091 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:07 vm00 ceph-mon[56720]: osdmap e67: 8 total, 8 up, 8 in 2026-03-10T09:49:08.091 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:07 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2652267837' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1870828082"}]: dispatch 2026-03-10T09:49:08.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:07 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3022761531"}]': finished 2026-03-10T09:49:08.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:07 vm03 ceph-mon[50536]: osdmap e67: 8 total, 8 up, 8 in 2026-03-10T09:49:08.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:07 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2652267837' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1870828082"}]: dispatch 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[52384]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 90 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2652267837' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1870828082"}]': finished 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[52384]: osdmap e68: 8 total, 8 up, 8 in 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3228128154' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2246671766"}]: dispatch 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2246671766"}]: dispatch 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[56720]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 90 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2652267837' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1870828082"}]': finished 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[56720]: osdmap e68: 8 total, 8 up, 8 in 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3228128154' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2246671766"}]: dispatch 2026-03-10T09:49:09.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:08 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2246671766"}]: dispatch 2026-03-10T09:49:09.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:08 vm03 ceph-mon[50536]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 90 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:09.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:08 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2652267837' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1870828082"}]': finished 2026-03-10T09:49:09.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:08 vm03 ceph-mon[50536]: osdmap e68: 8 total, 8 up, 8 in 2026-03-10T09:49:09.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:08 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3228128154' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2246671766"}]: dispatch 2026-03-10T09:49:09.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:08 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2246671766"}]: dispatch 2026-03-10T09:49:09.797 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:49:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:09] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:49:10.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:09 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:10.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:09 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2246671766"}]': finished 2026-03-10T09:49:10.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:09 vm00 ceph-mon[52384]: osdmap e69: 8 total, 8 up, 8 in 2026-03-10T09:49:10.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:09 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1948897248' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1008100373"}]: dispatch 2026-03-10T09:49:10.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:09 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:10.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:09 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2246671766"}]': finished 2026-03-10T09:49:10.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:09 vm00 ceph-mon[56720]: osdmap e69: 8 total, 8 up, 8 in 2026-03-10T09:49:10.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:09 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1948897248' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1008100373"}]: dispatch 2026-03-10T09:49:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:09 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:09 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2246671766"}]': finished 2026-03-10T09:49:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:09 vm03 ceph-mon[50536]: osdmap e69: 8 total, 8 up, 8 in 2026-03-10T09:49:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:09 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1948897248' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1008100373"}]: dispatch 2026-03-10T09:49:11.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:10 vm03 ceph-mon[50536]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 90 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:11.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:10 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1948897248' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1008100373"}]': finished 2026-03-10T09:49:11.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:10 vm03 ceph-mon[50536]: osdmap e70: 8 total, 8 up, 8 in 2026-03-10T09:49:11.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:10 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2995848002' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3678185549"}]: dispatch 2026-03-10T09:49:11.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:10 vm00 ceph-mon[52384]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 90 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:11.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:10 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1948897248' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1008100373"}]': finished 2026-03-10T09:49:11.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:10 vm00 ceph-mon[52384]: osdmap e70: 8 total, 8 up, 8 in 2026-03-10T09:49:11.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:10 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2995848002' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3678185549"}]: dispatch 2026-03-10T09:49:11.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:10 vm00 ceph-mon[56720]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 90 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:11.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:10 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1948897248' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1008100373"}]': finished 2026-03-10T09:49:11.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:10 vm00 ceph-mon[56720]: osdmap e70: 8 total, 8 up, 8 in 2026-03-10T09:49:11.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:10 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2995848002' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3678185549"}]: dispatch 2026-03-10T09:49:12.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:11 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2995848002' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3678185549"}]': finished 2026-03-10T09:49:12.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:11 vm03 ceph-mon[50536]: osdmap e71: 8 total, 8 up, 8 in 2026-03-10T09:49:12.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:11 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1018768423' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/607966756"}]: dispatch 2026-03-10T09:49:12.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:11 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/607966756"}]: dispatch 2026-03-10T09:49:12.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:11 vm03 ceph-mon[50536]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 90 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2995848002' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3678185549"}]': finished 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[52384]: osdmap e71: 8 total, 8 up, 8 in 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1018768423' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/607966756"}]: dispatch 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/607966756"}]: dispatch 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[52384]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 90 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2995848002' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3678185549"}]': finished 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[56720]: osdmap e71: 8 total, 8 up, 8 in 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1018768423' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/607966756"}]: dispatch 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/607966756"}]: dispatch 2026-03-10T09:49:12.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:11 vm00 ceph-mon[56720]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 90 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:49:13.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:12 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/607966756"}]': finished 2026-03-10T09:49:13.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:12 vm03 ceph-mon[50536]: osdmap e72: 8 total, 8 up, 8 in 2026-03-10T09:49:13.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:12 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/13148230' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3678185549"}]: dispatch 2026-03-10T09:49:13.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:13] "GET /metrics HTTP/1.1" 200 214477 "" "Prometheus/2.33.4" 2026-03-10T09:49:13.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:12 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/607966756"}]': finished 2026-03-10T09:49:13.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:12 vm00 ceph-mon[56720]: osdmap e72: 8 total, 8 up, 8 in 2026-03-10T09:49:13.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:12 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/13148230' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3678185549"}]: dispatch 2026-03-10T09:49:13.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:12 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/607966756"}]': finished 2026-03-10T09:49:13.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:12 vm00 ceph-mon[52384]: osdmap e72: 8 total, 8 up, 8 in 2026-03-10T09:49:13.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:12 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/13148230' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3678185549"}]: dispatch 2026-03-10T09:49:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:13.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:13.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:13.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:13.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:13.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:13.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:14.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:13 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/13148230' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3678185549"}]': finished 2026-03-10T09:49:14.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:13 vm03 ceph-mon[50536]: osdmap e73: 8 total, 8 up, 8 in 2026-03-10T09:49:14.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:13 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/237754453' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045903313"}]: dispatch 2026-03-10T09:49:14.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:13 vm03 ceph-mon[50536]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/13148230' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3678185549"}]': finished 2026-03-10T09:49:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-mon[52384]: osdmap e73: 8 total, 8 up, 8 in 2026-03-10T09:49:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/237754453' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045903313"}]: dispatch 2026-03-10T09:49:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:13 vm00 ceph-mon[52384]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:13 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/13148230' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3678185549"}]': finished 2026-03-10T09:49:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:13 vm00 ceph-mon[56720]: osdmap e73: 8 total, 8 up, 8 in 2026-03-10T09:49:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:13 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/237754453' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045903313"}]: dispatch 2026-03-10T09:49:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:13 vm00 ceph-mon[56720]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:15.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:14 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/237754453' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045903313"}]': finished 2026-03-10T09:49:15.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:14 vm03 ceph-mon[50536]: osdmap e74: 8 total, 8 up, 8 in 2026-03-10T09:49:15.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:14 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/237754453' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045903313"}]': finished 2026-03-10T09:49:15.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:14 vm00 ceph-mon[52384]: osdmap e74: 8 total, 8 up, 8 in 2026-03-10T09:49:15.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:14 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/237754453' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045903313"}]': finished 2026-03-10T09:49:15.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:14 vm00 ceph-mon[56720]: osdmap e74: 8 total, 8 up, 8 in 2026-03-10T09:49:15.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:49:15] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:49:16.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:16.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:16.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:16 vm03 ceph-mon[50536]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:16.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]: dispatch 2026-03-10T09:49:16.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-10T09:49:16.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.17", "id": [1, 2]}]: dispatch 2026-03-10T09:49:16.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1d", "id": [1, 2]}]: dispatch 2026-03-10T09:49:16.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:49:16.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:16 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[52384]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.17", "id": [1, 2]}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1d", "id": [1, 2]}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[56720]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.17", "id": [1, 2]}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1d", "id": [1, 2]}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:49:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:16 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]': finished 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]': finished 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.17", "id": [1, 2]}]': finished 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1d", "id": [1, 2]}]': finished 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[52384]: osdmap e75: 8 total, 8 up, 8 in 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]': finished 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]': finished 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.17", "id": [1, 2]}]': finished 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1d", "id": [1, 2]}]': finished 2026-03-10T09:49:17.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:17 vm00 ceph-mon[56720]: osdmap e75: 8 total, 8 up, 8 in 2026-03-10T09:49:17.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:17 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1f", "id": [7, 2]}]': finished 2026-03-10T09:49:17.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:17 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.6", "id": [1, 5]}]': finished 2026-03-10T09:49:17.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:17 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.17", "id": [1, 2]}]': finished 2026-03-10T09:49:17.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:17 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1d", "id": [1, 2]}]': finished 2026-03-10T09:49:17.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:17 vm03 ceph-mon[50536]: osdmap e75: 8 total, 8 up, 8 in 2026-03-10T09:49:18.142 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:18 vm00 ceph-mon[52384]: osdmap e76: 8 total, 8 up, 8 in 2026-03-10T09:49:18.142 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:18 vm00 ceph-mon[52384]: pgmap v67: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:18.143 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:18 vm00 ceph-mon[56720]: osdmap e76: 8 total, 8 up, 8 in 2026-03-10T09:49:18.143 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:18 vm00 ceph-mon[56720]: pgmap v67: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:18.487 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:49:18] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:49:18.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:18 vm03 ceph-mon[50536]: osdmap e76: 8 total, 8 up, 8 in 2026-03-10T09:49:18.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:18 vm03 ceph-mon[50536]: pgmap v67: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:19.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:49:19 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:19] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:49:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:19 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:19 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:19 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:19 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:19 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:19 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:19.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:19 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:19.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:19 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:19.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:19 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:20.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:20 vm03 ceph-mon[50536]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:20.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:20 vm00 ceph-mon[52384]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:20.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:20 vm00 ceph-mon[56720]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:22.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:22 vm00 ceph-mon[52384]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 704 B/s rd, 0 op/s 2026-03-10T09:49:22.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:22 vm00 ceph-mon[56720]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 704 B/s rd, 0 op/s 2026-03-10T09:49:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:22 vm03 ceph-mon[50536]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 704 B/s rd, 0 op/s 2026-03-10T09:49:23.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:23] "GET /metrics HTTP/1.1" 200 214493 "" "Prometheus/2.33.4" 2026-03-10T09:49:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:23.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:23.508Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:23.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:23.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:23.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:23.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:24.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:24 vm00 ceph-mon[52384]: pgmap v70: 161 pgs: 1 active+recovering, 160 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 137 B/s, 0 objects/s recovering 2026-03-10T09:49:24.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:24 vm00 ceph-mon[56720]: pgmap v70: 161 pgs: 1 active+recovering, 160 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 137 B/s, 0 objects/s recovering 2026-03-10T09:49:25.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:24 vm03 ceph-mon[50536]: pgmap v70: 161 pgs: 1 active+recovering, 160 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 137 B/s, 0 objects/s recovering 2026-03-10T09:49:26.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:26 vm00 ceph-mon[52384]: pgmap v71: 161 pgs: 1 active+recovering, 160 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 120 B/s, 0 objects/s recovering 2026-03-10T09:49:26.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:26 vm00 ceph-mon[56720]: pgmap v71: 161 pgs: 1 active+recovering, 160 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 120 B/s, 0 objects/s recovering 2026-03-10T09:49:27.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:26 vm03 ceph-mon[50536]: pgmap v71: 161 pgs: 1 active+recovering, 160 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 120 B/s, 0 objects/s recovering 2026-03-10T09:49:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:28 vm00 ceph-mon[52384]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s; 108 B/s, 0 objects/s recovering 2026-03-10T09:49:28.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:28 vm00 ceph-mon[56720]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s; 108 B/s, 0 objects/s recovering 2026-03-10T09:49:29.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:28 vm03 ceph-mon[50536]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s; 108 B/s, 0 objects/s recovering 2026-03-10T09:49:29.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:49:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:29] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:49:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:29 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:29 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:29.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:29 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:30 vm00 ceph-mon[52384]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 91 B/s, 0 objects/s recovering 2026-03-10T09:49:30.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:30 vm00 ceph-mon[56720]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 91 B/s, 0 objects/s recovering 2026-03-10T09:49:31.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:30 vm03 ceph-mon[50536]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 91 B/s, 0 objects/s recovering 2026-03-10T09:49:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:32 vm00 ceph-mon[52384]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 91 B/s, 0 objects/s recovering 2026-03-10T09:49:32.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:32 vm00 ceph-mon[56720]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 91 B/s, 0 objects/s recovering 2026-03-10T09:49:33.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:32 vm03 ceph-mon[50536]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 91 B/s, 0 objects/s recovering 2026-03-10T09:49:33.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:33] "GET /metrics HTTP/1.1" 200 214513 "" "Prometheus/2.33.4" 2026-03-10T09:49:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:33.508Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:33.508Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:33.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:33.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:33.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:34.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:34 vm00 ceph-mon[52384]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 91 B/s, 0 objects/s recovering 2026-03-10T09:49:34.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:34 vm00 ceph-mon[56720]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 91 B/s, 0 objects/s recovering 2026-03-10T09:49:35.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:34 vm03 ceph-mon[50536]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 91 B/s, 0 objects/s recovering 2026-03-10T09:49:35.619 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:35 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:49:35] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:49:36.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:36 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:36.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:36 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:36.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:36 vm03 ceph-mon[50536]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:36.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:36 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:36.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:36 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:36.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:36 vm00 ceph-mon[52384]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:36.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:36 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:36.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:36 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:36.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:36 vm00 ceph-mon[56720]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:38.377 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:38 vm00 ceph-mon[52384]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:38.377 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:38 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:49:38] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:49:38.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:38 vm00 ceph-mon[56720]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:38.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:38 vm03 ceph-mon[50536]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:39.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:39 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:39.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:39 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:39.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:39 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:39.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:39 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:39.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:39 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:39.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:39 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:39.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:49:39 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:39] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:49:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:39 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:39 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:39 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:40 vm03 ceph-mon[50536]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:40 vm00 ceph-mon[52384]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:40.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:40 vm00 ceph-mon[56720]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:42.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:42 vm00 ceph-mon[52384]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:42.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:42 vm00 ceph-mon[56720]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:43.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:42 vm03 ceph-mon[50536]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:43.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:43] "GET /metrics HTTP/1.1" 200 214513 "" "Prometheus/2.33.4" 2026-03-10T09:49:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:43.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:43.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:43.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:43.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:43.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:43.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:44.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:44 vm00 ceph-mon[52384]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:44.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:44 vm00 ceph-mon[56720]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:45.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:44 vm03 ceph-mon[50536]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:46 vm00 ceph-mon[52384]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:46.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:46 vm00 ceph-mon[56720]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:47.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:46 vm03 ceph-mon[50536]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:48 vm00 ceph-mon[52384]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:48 vm00 ceph-mon[56720]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:49.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:48 vm03 ceph-mon[50536]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:49.626 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:49:49 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:49] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:49:50.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:49 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:50.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:49 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:50.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:49 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:51.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:50 vm03 ceph-mon[50536]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:51.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:50 vm00 ceph-mon[52384]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:51.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:50 vm00 ceph-mon[56720]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:53.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:52 vm03 ceph-mon[50536]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:53.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:52 vm00 ceph-mon[52384]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:53.087 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:52 vm00 ceph-mon[56720]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:53.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:53] "GET /metrics HTTP/1.1" 200 214490 "" "Prometheus/2.33.4" 2026-03-10T09:49:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:53.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:49:53.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:53.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:53.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:53.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:49:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:49:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:49:53.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:49:55.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:54 vm03 ceph-mon[50536]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:54 vm00 ceph-mon[52384]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:55.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:54 vm00 ceph-mon[56720]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:55.619 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:49:55] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:49:56.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:56 vm03 ceph-mon[50536]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:56.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:56.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:56 vm00 ceph-mon[52384]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:56 vm00 ceph-mon[56720]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:49:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:58.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:58 vm00 ceph-mon[52384]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:58.469 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:49:58 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:49:58] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:49:58.469 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:58 vm00 ceph-mon[56720]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:58.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:58 vm03 ceph-mon[50536]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:49:59.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:49:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:49:59] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:49:59.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:59 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:59.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:59 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:59.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:49:59 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:59.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:59 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:59.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:59 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:59.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:49:59 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:59.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:59 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:49:59.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:59 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:49:59.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:49:59 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:00 vm03 ceph-mon[50536]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:00 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:50:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:00 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:50:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:00 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:00.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:00 vm03 ceph-mon[50536]: overall HEALTH_OK 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[52384]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[52384]: overall HEALTH_OK 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[56720]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:00.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:00 vm00 ceph-mon[56720]: overall HEALTH_OK 2026-03-10T09:50:02.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:02 vm00 ceph-mon[52384]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:02.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:02 vm00 ceph-mon[56720]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:02 vm03 ceph-mon[50536]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:03.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:03] "GET /metrics HTTP/1.1" 200 214462 "" "Prometheus/2.33.4" 2026-03-10T09:50:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:03.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:03.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:03.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:03.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:04.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:04 vm00 ceph-mon[52384]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:04.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:04 vm00 ceph-mon[56720]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:05.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:04 vm03 ceph-mon[50536]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:06.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:06 vm00 ceph-mon[52384]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:06.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:06 vm00 ceph-mon[56720]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:07.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:06 vm03 ceph-mon[50536]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:08.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:08 vm00 ceph-mon[52384]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:08.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:08 vm00 ceph-mon[56720]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:09.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:08 vm03 ceph-mon[50536]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:09.623 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:50:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:09] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:50:10.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:09 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:10.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:09 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:10.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:09 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:11.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:10 vm03 ceph-mon[50536]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:11.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:10 vm00 ceph-mon[52384]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:11.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:10 vm00 ceph-mon[56720]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:12 vm03 ceph-mon[50536]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:13.088 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:12 vm00 ceph-mon[52384]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:13.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:12 vm00 ceph-mon[56720]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:13.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:13] "GET /metrics HTTP/1.1" 200 214462 "" "Prometheus/2.33.4" 2026-03-10T09:50:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:13.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:13.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:13.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:13.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:13.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:13.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:14 vm03 ceph-mon[50536]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:15.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:14 vm00 ceph-mon[52384]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:15.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:14 vm00 ceph-mon[56720]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:15.619 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:50:15] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:50:16.028 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:15 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:50:16.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:15 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:50:16.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:15 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:16.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:15 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:16.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:15 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:50:16.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:15 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:50:16.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:15 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:16.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:15 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:16.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:15 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:50:16.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:15 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:50:16.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:15 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:16.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:15 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:17.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:16 vm03 ceph-mon[50536]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:17.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:16 vm00 ceph-mon[52384]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:17.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:16 vm00 ceph-mon[56720]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:18.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:18 vm00 ceph-mon[52384]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:18.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:18 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:18.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:18 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:18.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:18 vm00 ceph-mon[56720]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:18.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:18 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:18.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:18 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:18.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:50:18] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:50:19.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:18 vm03 ceph-mon[50536]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:19.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:18 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:19.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:18 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:19.657 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:50:19 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:19] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:50:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:19 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:20.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:19 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:20.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:19 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:21.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:20 vm03 ceph-mon[50536]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:21.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:20 vm00 ceph-mon[52384]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:21.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:20 vm00 ceph-mon[56720]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:22 vm03 ceph-mon[50536]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:23.089 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:22 vm00 ceph-mon[52384]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:23.089 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:22 vm00 ceph-mon[56720]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:23.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:23] "GET /metrics HTTP/1.1" 200 214480 "" "Prometheus/2.33.4" 2026-03-10T09:50:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:23.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:23.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:23.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:23.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:23.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:23.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:25.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:24 vm03 ceph-mon[50536]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:25.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:24 vm00 ceph-mon[52384]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:25.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:24 vm00 ceph-mon[56720]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:27.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:26 vm03 ceph-mon[50536]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:27.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:26 vm00 ceph-mon[52384]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:27.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:26 vm00 ceph-mon[56720]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:29.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:28 vm03 ceph-mon[50536]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:29.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:28 vm00 ceph-mon[52384]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:29.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:28 vm00 ceph-mon[56720]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:29.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:50:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:29] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:50:30.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:29 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:30.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:29 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:30.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:29 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:31.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:30 vm03 ceph-mon[50536]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:31.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:30 vm00 ceph-mon[52384]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:31.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:30 vm00 ceph-mon[56720]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:33.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:32 vm03 ceph-mon[50536]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:33.088 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:32 vm00 ceph-mon[52384]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:33.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:32 vm00 ceph-mon[56720]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:33.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:33] "GET /metrics HTTP/1.1" 200 214494 "" "Prometheus/2.33.4" 2026-03-10T09:50:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:33.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:33.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:33.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:33.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:33.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:33.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:35.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:34 vm03 ceph-mon[50536]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:34 vm00 ceph-mon[52384]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:35.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:34 vm00 ceph-mon[56720]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:35.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:35 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:50:35] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:50:36.388 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:36 vm03 ceph-mon[50536]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:36.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:36 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:36.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:36 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:36 vm00 ceph-mon[52384]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:36 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:36 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:36.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:36 vm00 ceph-mon[56720]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:36.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:36 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:36.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:36 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:38.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:38 vm00 ceph-mon[52384]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:38.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:38 vm00 ceph-mon[56720]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:38.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:38 vm03 ceph-mon[50536]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:39.046 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:38 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:50:38] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:50:39.657 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:50:39 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:39] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:50:39.918 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:39 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:39.919 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:39 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:39.919 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:39 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:39.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:39 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:39.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:39 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:39.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:39 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:40.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:39 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:40.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:39 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:40.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:39 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:41.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:40 vm03 ceph-mon[50536]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:40 vm00 ceph-mon[52384]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:40 vm00 ceph-mon[56720]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:43.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:42 vm03 ceph-mon[50536]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:43.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:42 vm00 ceph-mon[52384]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:43.087 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:42 vm00 ceph-mon[56720]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:43.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:43] "GET /metrics HTTP/1.1" 200 214494 "" "Prometheus/2.33.4" 2026-03-10T09:50:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:43.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:43.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:43.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:43.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:43.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:43.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:45.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:44 vm03 ceph-mon[50536]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:50:45.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:44 vm00 ceph-mon[52384]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:50:45.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:44 vm00 ceph-mon[56720]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:50:47.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:46 vm03 ceph-mon[50536]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:50:47.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:46 vm00 ceph-mon[52384]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:50:47.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:46 vm00 ceph-mon[56720]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:50:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:48 vm03 ceph-mon[50536]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:49.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:48 vm00 ceph-mon[52384]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:49.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:48 vm00 ceph-mon[56720]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:49.768 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:50:49 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:49] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:50:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:49 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:50.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:49 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:50.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:49 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:50 vm03 ceph-mon[50536]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:50:51.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:50 vm00 ceph-mon[52384]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:50:51.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:50 vm00 ceph-mon[56720]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:50:53.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:52 vm03 ceph-mon[50536]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:53.088 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:52 vm00 ceph-mon[52384]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:53.089 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:52 vm00 ceph-mon[56720]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:53.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:53] "GET /metrics HTTP/1.1" 200 214499 "" "Prometheus/2.33.4" 2026-03-10T09:50:53.691 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force' 2026-03-10T09:50:53.838 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:53.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:53.838 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:50:53.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:53.838 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:53.838 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:53.838 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:53.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:50:53.838 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:50:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:50:53.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:50:54.203 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force' 2026-03-10T09:50:54.696 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set global log_to_journald false --force' 2026-03-10T09:50:54.930 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:54 vm00 ceph-mon[52384]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:54.930 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:54 vm00 ceph-mon[56720]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:55.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:54 vm03 ceph-mon[50536]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:55.203 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-10T09:50:55.660 INFO:teuthology.orchestra.run.vm00.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:50:55.692 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:50:55] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:50:55.722 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T09:50:55.724 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm00.local 2026-03-10T09:50:55.724 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done' 2026-03-10T09:50:56.213 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:50:56.555 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 17s ago 4m - - 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 running 1s ago 4m - - 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (2m) 17s ago 2m 25.2M - ba2b418f427c 276d5952c165 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (2m) 1s ago 2m 45.5M - 8.3.5 dad864ee21e9 738bbf68f8ee 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (118s) 17s ago 118s 43.8M - 3.5 e1d6a67b021e 05c881f80b76 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443 running (4m) 1s ago 4m 419M - 17.2.0 e1d6a67b021e c7a9ee9d4177 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:9283 running (4m) 17s ago 4m 472M - 17.2.0 e1d6a67b021e 1120e4152adf 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (4m) 17s ago 4m 55.3M 2048M 17.2.0 e1d6a67b021e 9e4f29548d3b 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (4m) 1s ago 4m 40.6M 2048M 17.2.0 e1d6a67b021e 6c31bf149b5f 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (4m) 17s ago 4m 54.1M 2048M 17.2.0 e1d6a67b021e 9b572ece92f4 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (2m) 17s ago 2m 18.9M - 1dbe0e931976 dfbf18fec33b 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (2m) 1s ago 2m 20.7M - 1dbe0e931976 e8ec5e902bbc 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (3m) 17s ago 3m 52.6M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (3m) 17s ago 3m 51.0M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (3m) 17s ago 3m 46.1M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (3m) 17s ago 3m 48.0M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (3m) 1s ago 3m 50.7M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (3m) 1s ago 3m 49.4M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (3m) 1s ago 3m 47.2M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (2m) 1s ago 2m 48.4M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (2m) 1s ago 2m 53.3M - 514e6a882f6e 650ef4d78b8a 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (2m) 17s ago 2m 90.2M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (2m) 1s ago 2m 91.5M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (2m) 17s ago 2m 90.8M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:50:56.556 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (2m) 1s ago 2m 88.4M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: from='client.14853 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[52384]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: from='client.14853 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:56 vm00 ceph-mon[56720]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:50:56.752 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:50:56.753 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:50:56.753 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:50:56.753 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-10T09:50:56.753 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:50:56.753 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:50:56.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:50:56.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: from='client.14853 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:50:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:50:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:50:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:56.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:56 vm03 ceph-mon[50536]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:50:56.929 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:50:56.929 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:50:56.929 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:50:56.929 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T09:50:56.929 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "", 2026-03-10T09:50:56.929 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image" 2026-03-10T09:50:56.929 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:50:57.132 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T09:50:57.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:57 vm03 ceph-mon[50536]: from='client.24763 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:57.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:57 vm03 ceph-mon[50536]: from='client.14862 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:57.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:57 vm03 ceph-mon[50536]: from='client.24775 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:57.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:57 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2013430335' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:50:57.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:57 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1972872701' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[52384]: from='client.24763 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[52384]: from='client.14862 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[52384]: from='client.24775 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2013430335' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1972872701' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[56720]: from='client.24763 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[56720]: from='client.14862 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[56720]: from='client.24775 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2013430335' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:50:57.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:57 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1972872701' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:50:58.482 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:58 vm00 ceph-mon[52384]: from='client.14877 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:58.482 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:58 vm00 ceph-mon[52384]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:58.748 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:58 vm00 ceph-mon[56720]: from='client.14877 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:58.748 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:58 vm00 ceph-mon[56720]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:58.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:58 vm03 ceph-mon[50536]: from='client.14877 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:50:58.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:58 vm03 ceph-mon[50536]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:50:59.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:50:58 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:50:58] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:50:59.740 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:50:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:50:59] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:50:59.998 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:59 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:59.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:59 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:59.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:50:59 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:59.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:59 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:50:59.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:59 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:50:59.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:50:59 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:59 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:59 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:50:59 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:01.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:00 vm03 ceph-mon[50536]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:00 vm00 ceph-mon[56720]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:01.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:00 vm00 ceph-mon[52384]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:02 vm03 ceph-mon[50536]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:03.088 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:02 vm00 ceph-mon[52384]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:03.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:02 vm00 ceph-mon[56720]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:03.088 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:03] "GET /metrics HTTP/1.1" 200 214517 "" "Prometheus/2.33.4" 2026-03-10T09:51:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:03.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:03.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:03.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:03.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:03.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:04 vm03 ceph-mon[50536]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:04 vm00 ceph-mon[52384]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:04.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:04 vm00 ceph-mon[56720]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:06.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:06 vm00 ceph-mon[52384]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:06.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:06 vm00 ceph-mon[56720]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:07.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:06 vm03 ceph-mon[50536]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:09.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:08 vm03 ceph-mon[50536]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:08 vm00 ceph-mon[52384]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:09.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:08 vm00 ceph-mon[56720]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:09.747 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:51:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:09] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:51:10.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:09 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:10.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:09 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:10.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:09 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:11.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:10 vm03 ceph-mon[50536]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:11.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:10 vm00 ceph-mon[52384]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:11.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:10 vm00 ceph-mon[56720]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:13.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:12 vm03 ceph-mon[50536]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:13.089 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:12 vm00 ceph-mon[52384]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:13.089 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:12 vm00 ceph-mon[56720]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:13.089 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:13] "GET /metrics HTTP/1.1" 200 214517 "" "Prometheus/2.33.4" 2026-03-10T09:51:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:13.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:13.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:13.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:13.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:13.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:13.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:13.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:15.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:14 vm03 ceph-mon[50536]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:15.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:14 vm00 ceph-mon[52384]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:15.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:14 vm00 ceph-mon[56720]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:15 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:51:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:15 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:51:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:15 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:15 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:15.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:15 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:51:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:15 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:51:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:15 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:15 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:15.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:51:15] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:51:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:15 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:51:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:15 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:51:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:15 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:15 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:17.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:16 vm03 ceph-mon[50536]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:17.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:16 vm00 ceph-mon[52384]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:17.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:16 vm00 ceph-mon[56720]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:19.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:18 vm03 ceph-mon[50536]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:19.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:18 vm00 ceph-mon[56720]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:18 vm00 ceph-mon[52384]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:19.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:51:18] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:51:19.797 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:51:19 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:19] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:51:20.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:19 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:20.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:19 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:20.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:19 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:20.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:19 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:20.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:19 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:20.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:19 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:20.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:19 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:20.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:19 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:20.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:19 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:21.107 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:20 vm00 ceph-mon[52384]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:21.107 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:20 vm00 ceph-mon[56720]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:21.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:20 vm03 ceph-mon[50536]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:22.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:21 vm00 ceph-mon[52384]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:22.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:21 vm00 ceph-mon[56720]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:22.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:21 vm03 ceph-mon[50536]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:23.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:23] "GET /metrics HTTP/1.1" 200 214491 "" "Prometheus/2.33.4" 2026-03-10T09:51:23.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:23.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:23.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:23.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:23.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:23.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:23.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:23.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:23.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:23.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:23.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:23.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:25.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:25 vm03 ceph-mon[50536]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:25.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:25 vm00 ceph-mon[52384]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:25.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:25 vm00 ceph-mon[56720]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:27.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:26 vm03 ceph-mon[50536]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:27.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:26 vm00 ceph-mon[52384]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:27.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:26 vm00 ceph-mon[56720]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:27.386 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 8s ago 4m - - 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 running 12s ago 4m - - 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (2m) 8s ago 3m 25.3M - ba2b418f427c 276d5952c165 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (2m) 12s ago 2m 49.1M - 8.3.5 dad864ee21e9 738bbf68f8ee 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (2m) 8s ago 2m 44.2M - 3.5 e1d6a67b021e 05c881f80b76 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443 running (4m) 12s ago 4m 419M - 17.2.0 e1d6a67b021e c7a9ee9d4177 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:9283 running (5m) 8s ago 5m 473M - 17.2.0 e1d6a67b021e 1120e4152adf 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (5m) 8s ago 5m 46.7M 2048M 17.2.0 e1d6a67b021e 9e4f29548d3b 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (4m) 12s ago 4m 40.6M 2048M 17.2.0 e1d6a67b021e 6c31bf149b5f 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (4m) 8s ago 4m 43.3M 2048M 17.2.0 e1d6a67b021e 9b572ece92f4 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (3m) 8s ago 3m 17.3M - 1dbe0e931976 dfbf18fec33b 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (3m) 12s ago 3m 20.9M - 1dbe0e931976 e8ec5e902bbc 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (4m) 8s ago 4m 53.2M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (4m) 8s ago 4m 51.7M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (4m) 8s ago 4m 46.8M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (3m) 8s ago 3m 48.8M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (3m) 12s ago 3m 51.2M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (3m) 12s ago 3m 50.0M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (3m) 12s ago 3m 47.4M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (3m) 12s ago 3m 48.6M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (2m) 12s ago 2m 57.5M - 514e6a882f6e 650ef4d78b8a 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (2m) 8s ago 2m 90.4M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (2m) 12s ago 2m 91.6M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (2m) 8s ago 2m 91.0M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:51:27.754 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (2m) 12s ago 2m 88.5M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:51:28.397 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:51:28.397 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:51:28.397 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T09:51:28.397 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:51:28.397 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:51:28.398 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:51:28.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:28 vm03 ceph-mon[50536]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:28.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:28 vm03 ceph-mon[50536]: from='client.24796 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:28.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:28 vm03 ceph-mon[50536]: from='client.14889 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:28.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:28 vm03 ceph-mon[50536]: from='client.14895 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:28.592 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:28 vm00 ceph-mon[52384]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:28.592 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:28 vm00 ceph-mon[52384]: from='client.24796 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:28.592 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:28 vm00 ceph-mon[52384]: from='client.14889 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:28.592 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:28 vm00 ceph-mon[52384]: from='client.14895 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:28.592 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:28 vm00 ceph-mon[56720]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:28.592 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:28 vm00 ceph-mon[56720]: from='client.24796 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:28.592 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:28 vm00 ceph-mon[56720]: from='client.14889 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:28.592 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:28 vm00 ceph-mon[56720]: from='client.14895 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:28.592 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:51:28.592 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:51:28.592 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:51:28.592 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T09:51:28.592 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "", 2026-03-10T09:51:28.592 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image" 2026-03-10T09:51:28.592 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:51:28.824 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T09:51:29.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:51:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:29] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:51:29.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:29 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:29.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:29 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4230220368' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:51:29.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:29 vm03 ceph-mon[50536]: from='client.14907 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:29.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:29 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2939849486' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:51:29.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:29 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:29.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:29 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4230220368' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:51:29.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:29 vm00 ceph-mon[52384]: from='client.14907 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:29.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:29 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2939849486' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:51:29.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:29 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:29.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:29 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4230220368' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:51:29.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:29 vm00 ceph-mon[56720]: from='client.14907 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:51:29.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:29 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2939849486' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:51:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:30 vm03 ceph-mon[50536]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:30 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:30 vm03 ceph-mon[50536]: Upgrade: Target is version 19.2.3-678-ge911bdeb (unknown) 2026-03-10T09:51:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:30 vm03 ceph-mon[50536]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T09:51:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:30 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:51:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:30 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:30 vm03 ceph-mon[50536]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:51:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:30 vm03 ceph-mon[50536]: Upgrade: Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on vm03 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[52384]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[52384]: Upgrade: Target is version 19.2.3-678-ge911bdeb (unknown) 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[52384]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[52384]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[52384]: Upgrade: Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on vm03 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[56720]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[56720]: Upgrade: Target is version 19.2.3-678-ge911bdeb (unknown) 2026-03-10T09:51:30.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[56720]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T09:51:30.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:51:30.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:30.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[56720]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:51:30.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:30 vm00 ceph-mon[56720]: Upgrade: Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on vm03 2026-03-10T09:51:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:32 vm00 ceph-mon[52384]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:32.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:32 vm00 ceph-mon[56720]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:33.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:32 vm03 ceph-mon[50536]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:33.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:33] "GET /metrics HTTP/1.1" 200 214447 "" "Prometheus/2.33.4" 2026-03-10T09:51:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:33.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:33.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:33.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:33.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:33.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:34 vm00 ceph-mon[52384]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:35.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:34 vm00 ceph-mon[56720]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:34 vm03 ceph-mon[50536]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:36.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:35 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:51:35] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:51:37.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:36 vm03 ceph-mon[50536]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:37.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:36 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:37.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:36 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:36 vm00 ceph-mon[52384]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:36 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:36 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:36 vm00 ceph-mon[56720]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:36 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:36 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:39.024 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:38 vm03 ceph-mon[50536]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:39.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:38 vm00 ceph-mon[52384]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:39.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:38 vm00 ceph-mon[56720]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:39.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:38 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:51:38] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:51:39.688 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:51:39 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:39] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:51:40.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:39 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:40.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:39 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:40.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:39 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:40.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:39 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:40.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:39 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:40.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:39 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:40.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:39 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:40.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:39 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:40.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:39 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:41.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:40 vm03 ceph-mon[50536]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:40 vm00 ceph-mon[52384]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:40 vm00 ceph-mon[56720]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:42.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:41 vm03 ceph-mon[50536]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:42.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:41 vm00 ceph-mon[56720]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:41 vm00 ceph-mon[52384]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:43.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:43] "GET /metrics HTTP/1.1" 200 214447 "" "Prometheus/2.33.4" 2026-03-10T09:51:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:43.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:43.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:43.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:43.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:43.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:43.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:44.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:44 vm03 ceph-mon[50536]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:44.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:44 vm00 ceph-mon[52384]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:44.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:44 vm00 ceph-mon[56720]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:46 vm00 ceph-mon[52384]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:46.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:46 vm00 ceph-mon[56720]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:47.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:46 vm03 ceph-mon[50536]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:48 vm00 ceph-mon[52384]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:48 vm00 ceph-mon[56720]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:49.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:48 vm03 ceph-mon[50536]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:49.620 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:51:49 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:49] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:51:49.621 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:49 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:50.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:49 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:50.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:49 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:51.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:50 vm03 ceph-mon[50536]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:51.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:50 vm00 ceph-mon[52384]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:51.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:50 vm00 ceph-mon[56720]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:53.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:52 vm03 ceph-mon[50536]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:53.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:52 vm00 ceph-mon[52384]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:53.087 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:52 vm00 ceph-mon[56720]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:53.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:53] "GET /metrics HTTP/1.1" 200 214449 "" "Prometheus/2.33.4" 2026-03-10T09:51:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:53.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:53.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:53.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:51:53.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:53.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:51:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:51:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:51:53.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:51:55.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:54 vm03 ceph-mon[50536]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:54 vm00 ceph-mon[52384]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:55.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:54 vm00 ceph-mon[56720]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:56.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:56 vm03 ceph-mon[50536]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:56.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:56.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:56 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:56.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:56 vm00 ceph-mon[52384]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:56.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:56.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:56 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:56.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.103 - - [10/Mar/2026:09:51:56] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:51:56.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:56 vm00 ceph-mon[56720]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:51:56.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:56.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:56 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:58.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:58 vm03 ceph-mon[50536]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:58.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:58 vm00 ceph-mon[52384]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:58.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:58 vm00 ceph-mon[56720]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:51:59.016 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:51:59.274 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:59 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:59.274 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:59 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:59.274 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:51:59 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:59.274 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:51:59 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 192.168.123.100 - - [10/Mar/2026:09:51:59] "POST /data HTTP/1.1" 200 46 "" "Python-urllib/3.9" 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 0s ago 5m - - 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 running 3s ago 5m - - 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (3m) 0s ago 3m 25.3M - ba2b418f427c 276d5952c165 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (3m) 3s ago 3m 49.1M - 8.3.5 dad864ee21e9 738bbf68f8ee 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (3m) 0s ago 3m 47.3M - 3.5 e1d6a67b021e 05c881f80b76 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443 running (5m) 3s ago 5m 419M - 17.2.0 e1d6a67b021e c7a9ee9d4177 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:9283 running (5m) 0s ago 5m 475M - 17.2.0 e1d6a67b021e 1120e4152adf 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (5m) 0s ago 5m 47.7M 2048M 17.2.0 e1d6a67b021e 9e4f29548d3b 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (5m) 3s ago 5m 40.3M 2048M 17.2.0 e1d6a67b021e 6c31bf149b5f 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (5m) 0s ago 5m 43.2M 2048M 17.2.0 e1d6a67b021e 9b572ece92f4 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (3m) 0s ago 3m 18.7M - 1dbe0e931976 dfbf18fec33b 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (3m) 3s ago 3m 21.1M - 1dbe0e931976 e8ec5e902bbc 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (4m) 0s ago 4m 53.5M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (4m) 0s ago 4m 52.0M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (4m) 0s ago 4m 47.2M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (4m) 0s ago 4m 49.2M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (4m) 3s ago 4m 51.4M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (4m) 3s ago 4m 50.4M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (4m) 3s ago 4m 48.1M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:51:59.352 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (3m) 3s ago 3m 49.3M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:51:59.353 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (3m) 3s ago 3m 58.1M - 514e6a882f6e 650ef4d78b8a 2026-03-10T09:51:59.353 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (3m) 0s ago 3m 90.6M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:51:59.353 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (3m) 3s ago 3m 91.8M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:51:59.353 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (3m) 0s ago 3m 91.2M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:51:59.353 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (3m) 3s ago 3m 88.7M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:51:59.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:59 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:59.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:59 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:59.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:51:59 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:59.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:51:59 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:51:59] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:51:59.555 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:59 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:51:59.555 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:59 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:59.555 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:51:59 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:51:59.556 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:51:59.726 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:51:59.726 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:51:59.726 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:51:59.726 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T09:51:59.726 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "0/23 daemons upgraded", 2026-03-10T09:51:59.726 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image on host vm03" 2026-03-10T09:51:59.726 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:51:59.937 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T09:52:00.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:00 vm03 ceph-mon[50536]: from='client.24826 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:00 vm03 ceph-mon[50536]: from='client.24829 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:00 vm03 ceph-mon[50536]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:00 vm03 ceph-mon[50536]: from='client.24832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:00 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3601134230' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:52:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:00 vm03 ceph-mon[50536]: from='client.14937 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:00 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4281794864' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[52384]: from='client.24826 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[52384]: from='client.24829 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[52384]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[52384]: from='client.24832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3601134230' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[52384]: from='client.14937 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4281794864' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[56720]: from='client.24826 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[56720]: from='client.24829 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[56720]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[56720]: from='client.24832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3601134230' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[56720]: from='client.14937 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:00.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:00 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4281794864' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:52:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:02 vm03 ceph-mon[50536]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:52:02.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:02 vm00 ceph-mon[52384]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:52:02.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:02 vm00 ceph-mon[56720]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:52:03.088 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:52:03] "GET /metrics HTTP/1.1" 200 214446 "" "Prometheus/2.33.4" 2026-03-10T09:52:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:52:03.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:52:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:03.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:52:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:03.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:52:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:52:03.518Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:52:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:03.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:52:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:03.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:52:05.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:04 vm03 ceph-mon[50536]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:04 vm00 ceph-mon[52384]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:05.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:04 vm00 ceph-mon[56720]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:06.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:06 vm00 ceph-mon[52384]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:06.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:06 vm00 ceph-mon[56720]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:06.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:06 vm03 ceph-mon[50536]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:09.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:08 vm03 ceph-mon[50536]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:52:09.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:08 vm00 ceph-mon[52384]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:52:09.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:08 vm00 ceph-mon[56720]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:52:09.446 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[51700]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:52:09] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:52:09.713 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 systemd[1]: Stopping Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:09.714 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 bash[71558]: Error: no container with name or ID "ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr.x" found: no such container 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 podman[71564]: 2026-03-10 09:52:09.713869521 +0000 UTC m=+0.051861690 container died c7a9ee9d417710f884d3f9b288b6d9c692cb105b204b2daadf77ccf191e542dd (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, ceph=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, version=8, distribution-scope=public, vcs-type=git, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=CentOS Stream 8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, architecture=x86_64, io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, build-date=2022-05-03T08:36:31.336870, name=centos-stream, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_BRANCH=HEAD, release=754, com.redhat.component=centos-stream-container, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 podman[71564]: 2026-03-10 09:52:09.749232221 +0000 UTC m=+0.087224380 container remove c7a9ee9d417710f884d3f9b288b6d9c692cb105b204b2daadf77ccf191e542dd (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, io.k8s.display-name=CentOS Stream 8, GIT_BRANCH=HEAD, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.component=centos-stream-container, distribution-scope=public, RELEASE=HEAD, ceph=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, io.openshift.expose-services=, maintainer=Guillaume Abrioux , name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, release=754, CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, vendor=Red Hat, Inc., vcs-type=git) 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 bash[71564]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 bash[71583]: Error: no container with name or ID "ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr.x" found: no such container 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service: Failed with result 'exit-code'. 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 systemd[1]: Stopped Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service: Consumed 11.064s CPU time. 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:09 vm03 systemd[1]: Starting Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:09 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:09 vm03 ceph-mon[50536]: Upgrade: Updating mgr.x 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:09 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:09 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:09 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:09 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:09 vm03 ceph-mon[50536]: Deploying daemon mgr.x on vm03 2026-03-10T09:52:09.972 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:09 vm03 ceph-mon[50536]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:10.226 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:10 vm03 podman[71665]: 2026-03-10 09:52:10.072421265 +0000 UTC m=+0.020230846 container create 2d74d7e9d58313aeb78de5c1565c04b64e95c3c881307e254e9858120fb7d1e1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:52:10.226 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:10 vm03 podman[71665]: 2026-03-10 09:52:10.106953631 +0000 UTC m=+0.054763232 container init 2d74d7e9d58313aeb78de5c1565c04b64e95c3c881307e254e9858120fb7d1e1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=squid) 2026-03-10T09:52:10.226 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:10 vm03 podman[71665]: 2026-03-10 09:52:10.109528583 +0000 UTC m=+0.057338165 container start 2d74d7e9d58313aeb78de5c1565c04b64e95c3c881307e254e9858120fb7d1e1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.41.3) 2026-03-10T09:52:10.226 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:10 vm03 bash[71665]: 2d74d7e9d58313aeb78de5c1565c04b64e95c3c881307e254e9858120fb7d1e1 2026-03-10T09:52:10.226 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:10 vm03 podman[71665]: 2026-03-10 09:52:10.062190777 +0000 UTC m=+0.010000368 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:52:10.227 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:10 vm03 systemd[1]: Started Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:10.227 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:10.226+0000 7f07440c6140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:52:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[52384]: Upgrade: Updating mgr.x 2026-03-10T09:52:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:52:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:52:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[52384]: Deploying daemon mgr.x on vm03 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[52384]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:10.370 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:09.998Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": dial tcp 192.168.123.103:8443: connect: connection refused" 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[56720]: Upgrade: Updating mgr.x 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[56720]: Deploying daemon mgr.x on vm03 2026-03-10T09:52:10.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:09 vm00 ceph-mon[56720]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:52:10.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:10.272+0000 7f07440c6140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:52:11.047 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:10.717+0000 7f07440c6140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: Failing over to other MGR 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: osdmap e77: 8 total, 8 up, 8 in 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[52384]: Standby manager daemon y started 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ignoring --setuser ceph since I am not root 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ignoring --setgroup ceph since I am not root 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:11.316+0000 7f32249db000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:11.379+0000 7f32249db000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: Failing over to other MGR 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T09:52:11.457 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: osdmap e77: 8 total, 8 up, 8 in 2026-03-10T09:52:11.458 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:11 vm00 ceph-mon[56720]: Standby manager daemon y started 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: Failing over to other MGR 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: osdmap e77: 8 total, 8 up, 8 in 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:11 vm03 ceph-mon[50536]: Standby manager daemon y started 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:11.050+0000 7f07440c6140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: from numpy import show_config as show_numpy_config 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:11.143+0000 7f07440c6140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:11.194+0000 7f07440c6140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:52:11.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:11.270+0000 7f07440c6140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:52:11.802 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:11.457Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": dial tcp 192.168.123.103:8443: connect: connection refused" 2026-03-10T09:52:12.079 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:11.823+0000 7f07440c6140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:52:12.079 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:11.949+0000 7f07440c6140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:12.079 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:11.990+0000 7f07440c6140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:52:12.079 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:12.030+0000 7f07440c6140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:12.079 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:12.079+0000 7f07440c6140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:52:12.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:11.801+0000 7f32249db000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:52:12.363 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:12 vm03 ceph-mon[50536]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-10T09:52:12.363 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:12 vm03 ceph-mon[50536]: mgrmap e22: x(active, starting, since 1.01266s), standbys: y 2026-03-10T09:52:12.363 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:12.122+0000 7f07440c6140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:52:12.363 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:12.300+0000 7f07440c6140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:52:12.363 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:12.363+0000 7f07440c6140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:12.447 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:12 vm00 ceph-mon[52384]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-10T09:52:12.448 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:12 vm00 ceph-mon[52384]: mgrmap e22: x(active, starting, since 1.01266s), standbys: y 2026-03-10T09:52:12.448 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:12.203+0000 7f32249db000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:52:12.448 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:12.379+0000 7f32249db000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:52:12.448 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:12 vm00 ceph-mon[56720]: from='mgr.24302 192.168.123.100:0/1542571217' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-10T09:52:12.448 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:12 vm00 ceph-mon[56720]: mgrmap e22: x(active, starting, since 1.01266s), standbys: y 2026-03-10T09:52:12.614 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:12.614+0000 7f07440c6140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T09:52:12.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:12.447+0000 7f32249db000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:52:12.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:12.644+0000 7f32249db000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:52:12.929 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:12.929+0000 7f07440c6140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:52:13.222 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:12.969+0000 7f07440c6140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:52:13.222 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:13.013+0000 7f07440c6140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:52:13.222 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:13.098+0000 7f07440c6140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:52:13.222 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:13.139+0000 7f07440c6140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:52:13.222 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:13.222+0000 7f07440c6140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:52:13.487 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:13.344+0000 7f07440c6140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:13.488 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:13.487+0000 7f07440c6140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:52:13.513 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:13.204Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=7 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": dial tcp 192.168.123.100:8443: connect: connection refused" 2026-03-10T09:52:13.513 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:13.315+0000 7f32249db000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:13.527+0000 7f07440c6140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:13] ENGINE Bus STARTING 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: CherryPy Checker: 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: The Application mounted at '' has an empty config. 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:13] ENGINE Serving on http://:::9283 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:13] ENGINE Bus STARTED 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:13] ENGINE Bus STOPPING 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:13] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:13] ENGINE Bus STOPPED 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:13 vm03 ceph-mon[50536]: Active manager daemon x restarted 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:13 vm03 ceph-mon[50536]: Activating manager daemon x 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:13 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:13 vm03 ceph-mon[50536]: osdmap e78: 8 total, 8 up, 8 in 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:13 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:13 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:52:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:13 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:13.813 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[52384]: Active manager daemon x restarted 2026-03-10T09:52:13.813 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[52384]: Activating manager daemon x 2026-03-10T09:52:13.813 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:52:13.813 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[52384]: osdmap e78: 8 total, 8 up, 8 in 2026-03-10T09:52:13.813 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:13.813 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:52:13.813 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:13.813 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:52:13.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": dial tcp 192.168.123.103:8443: connect: connection refused; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:52:13.813 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:13.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": dial tcp 192.168.123.100:8443: connect: connection refused" 2026-03-10T09:52:13.813 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:13.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": dial tcp 192.168.123.103:8443: connect: connection refused" 2026-03-10T09:52:13.814 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=error ts=2026-03-10T09:52:13.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.103:8443/api/prometheus_receiver\": dial tcp 192.168.123.103:8443: connect: connection refused; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": dial tcp 192.168.123.100:8443: connect: connection refused" 2026-03-10T09:52:13.814 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:13.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": dial tcp 192.168.123.100:8443: connect: connection refused" 2026-03-10T09:52:13.814 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:13.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": dial tcp 192.168.123.103:8443: connect: connection refused" 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[56720]: Active manager daemon x restarted 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[56720]: Activating manager daemon x 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[56720]: osdmap e78: 8 total, 8 up, 8 in 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:13 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:13.512+0000 7f32249db000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:13.592+0000 7f32249db000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:13.660+0000 7f32249db000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:13.814 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:13.733+0000 7f32249db000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:52:14.078 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:13] ENGINE Bus STARTING 2026-03-10T09:52:14.079 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:14] ENGINE Serving on http://:::9283 2026-03-10T09:52:14.079 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:14] ENGINE Bus STARTED 2026-03-10T09:52:14.081 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:13.813+0000 7f32249db000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:52:14.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:14.157+0000 7f32249db000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:52:14.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:14.242+0000 7f32249db000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:14.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: mgrmap e23: x(active, starting, since 0.0594078s), standbys: y 2026-03-10T09:52:14.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:52:14.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:52:14.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:52:14.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: Manager daemon x is now available 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: Queued rgw.foo for migration 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: Queued rgw.smpl for migration 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'smpl', 'service_name': 'rgw.smpl', 'service_type': 'rgw'} 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: Migrating certs/keys for rgw.smpl spec to cert store 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: Checking for cert/key for grafana.a 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: mgrmap e23: x(active, starting, since 0.0594078s), standbys: y 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: Manager daemon x is now available 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: Queued rgw.foo for migration 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: Queued rgw.smpl for migration 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'smpl', 'service_name': 'rgw.smpl', 'service_type': 'rgw'} 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: Migrating certs/keys for rgw.smpl spec to cert store 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: Checking for cert/key for grafana.a 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:14.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:14.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:14.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:14 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:14.872 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:14.604Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=3 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:52:14.872 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:14.847Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=3 err="Post \"https://192.168.123.103:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.103 because it doesn't contain any IP SANs" 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: mgrmap e23: x(active, starting, since 0.0594078s), standbys: y 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: Manager daemon x is now available 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: Queued rgw.foo for migration 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: Queued rgw.smpl for migration 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'smpl', 'service_name': 'rgw.smpl', 'service_type': 'rgw'} 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: Migrating certs/keys for rgw.smpl spec to cert store 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: Checking for cert/key for grafana.a 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:14 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:15.263 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:14.943+0000 7f32249db000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:52:15.264 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:15.045+0000 7f32249db000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:52:15.264 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:15.124+0000 7f32249db000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:52:15.551 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:15.263+0000 7f32249db000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:52:15.551 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:15.334+0000 7f32249db000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:52:15.551 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:15.443+0000 7f32249db000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: mgrmap e24: x(active, since 1.11037s), standbys: y 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:15.550+0000 7f32249db000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: mgrmap e24: x(active, since 1.11037s), standbys: y 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:15.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:15 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: mgrmap e24: x(active, since 1.11037s), standbys: y 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:16.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:15 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:16.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:15.905+0000 7f32249db000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:52:16.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:15.978+0000 7f32249db000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:52:16.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:52:15] ENGINE Bus STARTING 2026-03-10T09:52:16.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: CherryPy Checker: 2026-03-10T09:52:16.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: The Application mounted at '' has an empty config. 2026-03-10T09:52:16.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: 2026-03-10T09:52:16.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:52:16] ENGINE Serving on http://:::9283 2026-03-10T09:52:16.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: [10/Mar/2026:09:52:16] ENGINE Bus STARTED 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:15] ENGINE Bus STARTING 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:15] ENGINE Serving on https://192.168.123.103:7150 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:15] ENGINE Client ('192.168.123.103', 59928) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:15] ENGINE Serving on http://192.168.123.103:8765 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:15] ENGINE Bus STARTED 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: Standby manager daemon y restarted 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: Standby manager daemon y started 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: mgrmap e25: x(active, since 2s), standbys: y 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[52384]: Reconfiguring daemon agent.vm00 on vm00 2026-03-10T09:52:16.783 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:16.447Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=5 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:52:16.783 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=warn ts=2026-03-10T09:52:16.618Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:15] ENGINE Bus STARTING 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:15] ENGINE Serving on https://192.168.123.103:7150 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:15] ENGINE Client ('192.168.123.103', 59928) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:15] ENGINE Serving on http://192.168.123.103:8765 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:15] ENGINE Bus STARTED 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: Standby manager daemon y restarted 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: Standby manager daemon y started 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: mgrmap e25: x(active, since 2s), standbys: y 2026-03-10T09:52:16.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:16 vm00 ceph-mon[56720]: Reconfiguring daemon agent.vm00 on vm00 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:15] ENGINE Bus STARTING 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:15] ENGINE Serving on https://192.168.123.103:7150 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:15] ENGINE Client ('192.168.123.103', 59928) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:15] ENGINE Serving on http://192.168.123.103:8765 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:15] ENGINE Bus STARTED 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: Standby manager daemon y restarted 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: Standby manager daemon y started 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/3015152319' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: mgrmap e25: x(active, since 2s), standbys: y 2026-03-10T09:52:16.959 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:16 vm03 ceph-mon[50536]: Reconfiguring daemon agent.vm00 on vm00 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[52384]: Deploying cephadm binary to vm00 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[52384]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[52384]: Deploying daemon alertmanager.a on vm00 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[56720]: Deploying cephadm binary to vm00 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:18.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:18.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[56720]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T09:52:18.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:17 vm00 ceph-mon[56720]: Deploying daemon alertmanager.a on vm00 2026-03-10T09:52:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:17 vm03 ceph-mon[50536]: Deploying cephadm binary to vm00 2026-03-10T09:52:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:17 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:17 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:17 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:17 vm03 ceph-mon[50536]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T09:52:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:17 vm03 ceph-mon[50536]: Deploying daemon alertmanager.a on vm00 2026-03-10T09:52:19.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:18 vm03 ceph-mon[50536]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:19.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:18 vm03 ceph-mon[50536]: mgrmap e26: x(active, since 4s), standbys: y 2026-03-10T09:52:19.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:18 vm00 ceph-mon[52384]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:19.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:18 vm00 ceph-mon[52384]: mgrmap e26: x(active, since 4s), standbys: y 2026-03-10T09:52:19.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:18 vm00 ceph-mon[56720]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:19.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:18 vm00 ceph-mon[56720]: mgrmap e26: x(active, since 4s), standbys: y 2026-03-10T09:52:19.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:19 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:52:19] "GET /metrics HTTP/1.1" 200 34967 "" "Prometheus/2.33.4" 2026-03-10T09:52:20.051 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:19 vm00 ceph-mon[56720]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:20.052 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:19 vm00 ceph-mon[52384]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:20.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:19 vm03 ceph-mon[50536]: from='client.14745 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:20.346 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 systemd[1]: Stopping Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:20.346 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[74309]: level=info ts=2026-03-10T09:52:20.226Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-10T09:52:20.346 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 podman[84943]: 2026-03-10 09:52:20.238047342 +0000 UTC m=+0.027654485 container died 276d5952c1656724392f5ba5fd3d904afd453f7beb8ac9f6753903255d8d3248 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:20.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 podman[84943]: 2026-03-10 09:52:20.25251868 +0000 UTC m=+0.042125823 container remove 276d5952c1656724392f5ba5fd3d904afd453f7beb8ac9f6753903255d8d3248 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:20.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 podman[84943]: 2026-03-10 09:52:20.253486441 +0000 UTC m=+0.043093584 volume remove adfa6748053afa4cad34c7c0259fc532b1f1cc4d04d707bf94b226bdf4cb2f1f 2026-03-10T09:52:20.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 bash[84943]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a 2026-03-10T09:52:20.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@alertmanager.a.service: Deactivated successfully. 2026-03-10T09:52:20.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 systemd[1]: Stopped Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:20.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 systemd[1]: Starting Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:20.936 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 podman[85054]: 2026-03-10 09:52:20.639193559 +0000 UTC m=+0.019632279 volume create 2eccfcbe1cbef02e48c98b04f8824430f105fcbc56d683430cadf6bca73986f7 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 podman[85054]: 2026-03-10 09:52:20.644235989 +0000 UTC m=+0.024674709 container create 44fbe9462c876e42b51d3ab0b10ac0c533098adf52eea628e6e116b60cbc65e9 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 podman[85054]: 2026-03-10 09:52:20.675913433 +0000 UTC m=+0.056352163 container init 44fbe9462c876e42b51d3ab0b10ac0c533098adf52eea628e6e116b60cbc65e9 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 podman[85054]: 2026-03-10 09:52:20.678580785 +0000 UTC m=+0.059019505 container start 44fbe9462c876e42b51d3ab0b10ac0c533098adf52eea628e6e116b60cbc65e9 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 bash[85054]: 44fbe9462c876e42b51d3ab0b10ac0c533098adf52eea628e6e116b60cbc65e9 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 podman[85054]: 2026-03-10 09:52:20.631508845 +0000 UTC m=+0.011947574 image pull c8568f914cd25b2062c44e9f79f9c18da6e3b85fe0c47a12a2191c61426c2b19 quay.io/prometheus/alertmanager:v0.25.0 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 systemd[1]: Started Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:20.697Z caller=main.go:240 level=info msg="Starting Alertmanager" version="(version=0.25.0, branch=HEAD, revision=258fab7cdd551f2cf251ed0348f0ad7289aee789)" 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:20.697Z caller=main.go:241 level=info build_context="(go=go1.19.4, user=root@abe866dd5717, date=20221222-14:51:36)" 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:20.702Z caller=cluster.go:185 level=info component=cluster msg="setting advertise address explicitly" addr=192.168.123.100 port=9094 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:20.704Z caller=cluster.go:681 level=info component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:20.751Z caller=coordinator.go:113 level=info component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:20.751Z caller=coordinator.go:126 level=info component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:20.755Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9093 2026-03-10T09:52:20.937 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:20.755Z caller=tls_config.go:235 level=info msg="TLS is disabled." http2=false address=[::]:9093 2026-03-10T09:52:21.193 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-mon[52384]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:21.193 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:21.193 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:20 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:21.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:20 vm00 ceph-mon[56720]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:21.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:20 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:21.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:20 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:21.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:20 vm03 ceph-mon[50536]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:21.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:20 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:21.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:20 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:21.517 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:21 vm00 systemd[1]: Stopping Ceph node-exporter.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:21.796 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:21 vm00 podman[85265]: 2026-03-10 09:52:21.517004482 +0000 UTC m=+0.027885187 container died dfbf18fec33b9f58e77312ca3031790315aae980c990470757b2b34365c1e890 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:21.797 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:21 vm00 podman[85265]: 2026-03-10 09:52:21.536282437 +0000 UTC m=+0.047163142 container remove dfbf18fec33b9f58e77312ca3031790315aae980c990470757b2b34365c1e890 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:21.797 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:21 vm00 bash[85265]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a 2026-03-10T09:52:21.797 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:21 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-10T09:52:21.797 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:21 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-10T09:52:21.797 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:21 vm00 systemd[1]: Stopped Ceph node-exporter.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:21.797 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:21 vm00 systemd[1]: Starting Ceph node-exporter.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:22 vm03 ceph-mon[50536]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-10T09:52:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:22 vm03 ceph-mon[50536]: Deploying daemon node-exporter.a on vm00 2026-03-10T09:52:22.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:22 vm00 ceph-mon[52384]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-10T09:52:22.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:22 vm00 ceph-mon[52384]: Deploying daemon node-exporter.a on vm00 2026-03-10T09:52:22.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:22 vm00 ceph-mon[56720]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-10T09:52:22.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:22 vm00 ceph-mon[56720]: Deploying daemon node-exporter.a on vm00 2026-03-10T09:52:22.120 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:21 vm00 bash[85375]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-10T09:52:23.010 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:22.704Z caller=cluster.go:706 level=info component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000810397s 2026-03-10T09:52:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:23 vm03 ceph-mon[50536]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T09:52:23.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:52:23] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:52:23.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:23 vm00 ceph-mon[56720]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T09:52:23.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-mon[52384]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T09:52:23.369 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 bash[85375]: Getting image source signatures 2026-03-10T09:52:23.369 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 bash[85375]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-10T09:52:23.369 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 bash[85375]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-10T09:52:23.369 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 bash[85375]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 bash[85375]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 bash[85375]: Writing manifest to image destination 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 podman[85375]: 2026-03-10 09:52:23.86919919 +0000 UTC m=+1.992734039 container create b0bf12b366fd754a8e14dcf8e2a90cd2a4b4abafcd18ef40a9f7f27f380e1545 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 podman[85375]: 2026-03-10 09:52:23.86002608 +0000 UTC m=+1.983560929 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 podman[85375]: 2026-03-10 09:52:23.896901484 +0000 UTC m=+2.020436333 container init b0bf12b366fd754a8e14dcf8e2a90cd2a4b4abafcd18ef40a9f7f27f380e1545 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 podman[85375]: 2026-03-10 09:52:23.899221708 +0000 UTC m=+2.022756557 container start b0bf12b366fd754a8e14dcf8e2a90cd2a4b4abafcd18ef40a9f7f27f380e1545 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.902Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.902Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.902Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.902Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.902Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.902Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 bash[85375]: b0bf12b366fd754a8e14dcf8e2a90cd2a4b4abafcd18ef40a9f7f27f380e1545 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=arp 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=edac 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 systemd[1]: Started Ceph node-exporter.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-10T09:52:24.121 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=os 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=stat 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=time 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=uname 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-10T09:52:24.122 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 09:52:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a[85429]: ts=2026-03-10T09:52:23.903Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-10T09:52:25.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:25.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:24 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:25.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:25.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:24 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:26.241 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:25 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2687485654' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:52:26.241 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:25 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4036356322' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3630440441"}]: dispatch 2026-03-10T09:52:26.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:25 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2687485654' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:52:26.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:25 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4036356322' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3630440441"}]: dispatch 2026-03-10T09:52:26.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:25 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2687485654' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:52:26.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:25 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4036356322' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3630440441"}]: dispatch 2026-03-10T09:52:26.510 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:26 vm03 systemd[1]: Stopping Ceph node-exporter.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:26.798 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:26 vm03 podman[73607]: 2026-03-10 09:52:26.597411707 +0000 UTC m=+0.033846266 container died e8ec5e902bbcbb6a4fd1de239a49dff725ab197735bc19bffeb99e4b04825c5b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T09:52:26.798 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:26 vm03 podman[73607]: 2026-03-10 09:52:26.625739671 +0000 UTC m=+0.062174220 container remove e8ec5e902bbcbb6a4fd1de239a49dff725ab197735bc19bffeb99e4b04825c5b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T09:52:26.798 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:26 vm03 bash[73607]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b 2026-03-10T09:52:26.798 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:26 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-10T09:52:26.798 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:26 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-10T09:52:26.798 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:26 vm03 systemd[1]: Stopped Ceph node-exporter.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:26.798 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:26 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.b.service: Consumed 1.106s CPU time. 2026-03-10T09:52:27.097 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:26 vm03 systemd[1]: Starting Ceph node-exporter.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:27.097 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:27 vm03 bash[73800]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-10T09:52:27.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: Reconfiguring daemon agent.vm03 on vm03 2026-03-10T09:52:27.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: Deploying cephadm binary to vm03 2026-03-10T09:52:27.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T09:52:27.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4036356322' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3630440441"}]': finished 2026-03-10T09:52:27.098 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: osdmap e79: 8 total, 8 up, 8 in 2026-03-10T09:52:27.098 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:27.098 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:27.098 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:27.098 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-10T09:52:27.098 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: Deploying daemon node-exporter.b on vm03 2026-03-10T09:52:27.098 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:26 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3516387722' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/394930303"}]: dispatch 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: Reconfiguring daemon agent.vm03 on vm03 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: Deploying cephadm binary to vm03 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4036356322' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3630440441"}]': finished 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: osdmap e79: 8 total, 8 up, 8 in 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: Deploying daemon node-exporter.b on vm03 2026-03-10T09:52:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3516387722' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/394930303"}]: dispatch 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: Reconfiguring daemon agent.vm03 on vm03 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: Deploying cephadm binary to vm03 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4036356322' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3630440441"}]': finished 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: osdmap e79: 8 total, 8 up, 8 in 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: Deploying daemon node-exporter.b on vm03 2026-03-10T09:52:27.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:26 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3516387722' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/394930303"}]: dispatch 2026-03-10T09:52:28.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:28 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3516387722' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/394930303"}]': finished 2026-03-10T09:52:28.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:28 vm00 ceph-mon[52384]: osdmap e80: 8 total, 8 up, 8 in 2026-03-10T09:52:28.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:28 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/862838901' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/933298556"}]: dispatch 2026-03-10T09:52:28.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:28 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/933298556"}]: dispatch 2026-03-10T09:52:28.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:28 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3516387722' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/394930303"}]': finished 2026-03-10T09:52:28.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:28 vm00 ceph-mon[56720]: osdmap e80: 8 total, 8 up, 8 in 2026-03-10T09:52:28.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:28 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/862838901' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/933298556"}]: dispatch 2026-03-10T09:52:28.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:28 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/933298556"}]: dispatch 2026-03-10T09:52:28.548 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:28 vm03 bash[73800]: Getting image source signatures 2026-03-10T09:52:28.548 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:28 vm03 bash[73800]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-10T09:52:28.548 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:28 vm03 bash[73800]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-10T09:52:28.548 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:28 vm03 bash[73800]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-10T09:52:28.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:28 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3516387722' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/394930303"}]': finished 2026-03-10T09:52:28.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:28 vm03 ceph-mon[50536]: osdmap e80: 8 total, 8 up, 8 in 2026-03-10T09:52:28.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:28 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/862838901' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/933298556"}]: dispatch 2026-03-10T09:52:28.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:28 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/933298556"}]: dispatch 2026-03-10T09:52:29.295 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-mon[50536]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T09:52:29.295 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/933298556"}]': finished 2026-03-10T09:52:29.295 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-mon[50536]: osdmap e81: 8 total, 8 up, 8 in 2026-03-10T09:52:29.295 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1342384718' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/135643742"}]: dispatch 2026-03-10T09:52:29.295 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 bash[73800]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 bash[73800]: Writing manifest to image destination 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 podman[73800]: 2026-03-10 09:52:29.051781399 +0000 UTC m=+2.015139153 container create 4e046233188d3617f420463a450aa0dcfa0348675bf974dd16e8a4e84e8c60dc (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 podman[73800]: 2026-03-10 09:52:29.040808287 +0000 UTC m=+2.004166060 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 podman[73800]: 2026-03-10 09:52:29.104903391 +0000 UTC m=+2.068261154 container init 4e046233188d3617f420463a450aa0dcfa0348675bf974dd16e8a4e84e8c60dc (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 podman[73800]: 2026-03-10 09:52:29.107452023 +0000 UTC m=+2.070809786 container start 4e046233188d3617f420463a450aa0dcfa0348675bf974dd16e8a4e84e8c60dc (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 bash[73800]: 4e046233188d3617f420463a450aa0dcfa0348675bf974dd16e8a4e84e8c60dc 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.125Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.125Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 systemd[1]: Started Ceph node-exporter.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.129Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.129Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.129Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.129Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=arp 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=edac 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.130Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-10T09:52:29.295 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=os 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.131Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=stat 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=time 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=uname 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.132Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-10T09:52:29.296 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b[74092]: ts=2026-03-10T09:52:29.133Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-10T09:52:29.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[52384]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T09:52:29.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/933298556"}]': finished 2026-03-10T09:52:29.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[52384]: osdmap e81: 8 total, 8 up, 8 in 2026-03-10T09:52:29.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1342384718' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/135643742"}]: dispatch 2026-03-10T09:52:29.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:29.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[56720]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T09:52:29.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/933298556"}]': finished 2026-03-10T09:52:29.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[56720]: osdmap e81: 8 total, 8 up, 8 in 2026-03-10T09:52:29.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1342384718' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/135643742"}]: dispatch 2026-03-10T09:52:29.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:29 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:29.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:52:29] "GET /metrics HTTP/1.1" 200 37769 "" "Prometheus/2.33.4" 2026-03-10T09:52:30.190 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1342384718' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/135643742"}]': finished 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[52384]: osdmap e82: 8 total, 8 up, 8 in 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4236345289' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/898935092"}]: dispatch 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1342384718' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/135643742"}]': finished 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[56720]: osdmap e82: 8 total, 8 up, 8 in 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:30.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:30 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4236345289' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/898935092"}]: dispatch 2026-03-10T09:52:30.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:30 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1342384718' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/135643742"}]': finished 2026-03-10T09:52:30.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:30 vm03 ceph-mon[50536]: osdmap e82: 8 total, 8 up, 8 in 2026-03-10T09:52:30.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:30 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:30.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:30 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:30.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:30 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4236345289' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/898935092"}]: dispatch 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 starting - - - - 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 starting - - - - 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 starting - - - - 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (3m) 34s ago 3m 49.1M - 8.3.5 dad864ee21e9 738bbf68f8ee 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 starting - - - - 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283 starting - - - - 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:9283 running (6m) 31s ago 6m 475M - 17.2.0 e1d6a67b021e 1120e4152adf 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (6m) 31s ago 6m 47.7M 2048M 17.2.0 e1d6a67b021e 9e4f29548d3b 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (5m) 34s ago 5m 40.3M 2048M 17.2.0 e1d6a67b021e 6c31bf149b5f 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (5m) 31s ago 5m 43.2M 2048M 17.2.0 e1d6a67b021e 9b572ece92f4 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 starting - - - - 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 starting - - - - 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (5m) 31s ago 5m 53.5M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (5m) 31s ago 5m 52.0M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (5m) 31s ago 5m 47.2M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (5m) 31s ago 5m 49.2M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (4m) 34s ago 4m 51.4M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (4m) 34s ago 4m 50.4M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (4m) 34s ago 4m 48.1M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (4m) 34s ago 4m 49.3M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (3m) 34s ago 3m 58.1M - 514e6a882f6e 650ef4d78b8a 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (3m) 31s ago 3m 90.6M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (3m) 34s ago 3m 91.8M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:52:30.630 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (3m) 31s ago 3m 91.2M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:52:30.631 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (3m) 34s ago 3m 88.7M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:52:30.898 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:52:30.898 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:52:30.898 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T09:52:30.898 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:52:30.898 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 16, 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:52:30.899 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:52:30.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:30.708Z caller=cluster.go:698 level=info component=cluster msg="gossip settled; proceeding" elapsed=10.004672745s 2026-03-10T09:52:30.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:30.710Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:30.899 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:30.710Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:31.120 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:52:31.120 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:52:31.120 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:52:31.120 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:52:31.120 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T09:52:31.120 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "", 2026-03-10T09:52:31.120 INFO:teuthology.orchestra.run.vm00.stdout: "message": "", 2026-03-10T09:52:31.120 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:52:31.120 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[52384]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[52384]: Deploying daemon prometheus.a on vm03 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[52384]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4236345289' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/898935092"}]': finished 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[52384]: osdmap e83: 8 total, 8 up, 8 in 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4278937982' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/99749656"}]: dispatch 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4253603330' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[56720]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[56720]: Deploying daemon prometheus.a on vm03 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[56720]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4236345289' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/898935092"}]': finished 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[56720]: osdmap e83: 8 total, 8 up, 8 in 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4278937982' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/99749656"}]: dispatch 2026-03-10T09:52:31.196 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:31 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4253603330' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:52:31.394 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T09:52:31.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:31 vm03 ceph-mon[50536]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:52:31.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:31 vm03 ceph-mon[50536]: Deploying daemon prometheus.a on vm03 2026-03-10T09:52:31.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:31 vm03 ceph-mon[50536]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:31.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4236345289' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/898935092"}]': finished 2026-03-10T09:52:31.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:31 vm03 ceph-mon[50536]: osdmap e83: 8 total, 8 up, 8 in 2026-03-10T09:52:31.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4278937982' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/99749656"}]: dispatch 2026-03-10T09:52:31.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:31 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4253603330' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:52:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:32 vm03 ceph-mon[50536]: from='client.15018 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:32 vm03 ceph-mon[50536]: from='client.24910 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:32 vm03 ceph-mon[50536]: from='client.24916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:32 vm03 ceph-mon[50536]: from='client.24925 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:32 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4278937982' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/99749656"}]': finished 2026-03-10T09:52:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:32 vm03 ceph-mon[50536]: osdmap e84: 8 total, 8 up, 8 in 2026-03-10T09:52:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:32 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1729396827' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:52:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:32 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/560866689' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/99749656"}]: dispatch 2026-03-10T09:52:32.548 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:32.316Z caller=manager.go:609 level=warn component="rule manager" group=pools msg="Evaluating rule failed" rule="alert: CephPoolGrowthWarning\nexpr: (predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id) group_right()\n ceph_pool_metadata) >= 95\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.9.2\n severity: warning\n type: ceph_default\nannotations:\n description: |\n Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours.\n summary: Pool growth rate may soon exceed it's capacity\n" err="found duplicate series for the match group {pool_id=\"1\"} on the left hand-side of the operation: [{instance=\"192.168.123.103:9283\", job=\"ceph\", pool_id=\"1\"}, {instance=\"192.168.123.100:9283\", job=\"ceph\", pool_id=\"1\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[52384]: from='client.15018 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[52384]: from='client.24910 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[52384]: from='client.24916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[52384]: from='client.24925 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4278937982' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/99749656"}]': finished 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[52384]: osdmap e84: 8 total, 8 up, 8 in 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1729396827' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/560866689' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/99749656"}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[56720]: from='client.15018 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[56720]: from='client.24910 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[56720]: from='client.24916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[56720]: from='client.24925 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4278937982' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/99749656"}]': finished 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[56720]: osdmap e84: 8 total, 8 up, 8 in 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1729396827' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:52:32.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:32 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/560866689' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/99749656"}]: dispatch 2026-03-10T09:52:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:33 vm00 ceph-mon[52384]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:52:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:33 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/560866689' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/99749656"}]': finished 2026-03-10T09:52:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:33 vm00 ceph-mon[52384]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T09:52:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:33 vm00 ceph-mon[56720]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:52:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:33 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/560866689' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/99749656"}]': finished 2026-03-10T09:52:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:33 vm00 ceph-mon[56720]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T09:52:33.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[52592]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:52:33] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T09:52:33.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:33 vm03 ceph-mon[50536]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:52:33.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:33 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/560866689' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/99749656"}]': finished 2026-03-10T09:52:33.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:33 vm03 ceph-mon[50536]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T09:52:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:33.505Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 4 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:33.505Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 5 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:33.505Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:33.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:33.506Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:34.580 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:34 vm03 ceph-mon[50536]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:52:34.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:34 vm00 ceph-mon[52384]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:52:34.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:34 vm00 ceph-mon[56720]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:34 vm03 systemd[1]: Stopping Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.055Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.055Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.055Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.055Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.055Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.055Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.056Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.056Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.057Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.058Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.058Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[65209]: ts=2026-03-10T09:52:35.058Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 podman[74514]: 2026-03-10 09:52:35.068169337 +0000 UTC m=+0.031983158 container died 650ef4d78b8af2da3f02182c78e5eb67f20fac4d43d18abba9b2fd177fbe44ae (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 podman[74514]: 2026-03-10 09:52:35.086811318 +0000 UTC m=+0.050625119 container remove 650ef4d78b8af2da3f02182c78e5eb67f20fac4d43d18abba9b2fd177fbe44ae (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:35.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 bash[74514]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a 2026-03-10T09:52:35.470 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@prometheus.a.service: Deactivated successfully. 2026-03-10T09:52:35.470 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 systemd[1]: Stopped Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:35.470 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 systemd[1]: Starting Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:35.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:35 vm03 ceph-mon[50536]: from='client.24868 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 podman[74624]: 2026-03-10 09:52:35.470004439 +0000 UTC m=+0.022186988 container create f37ff7258e9a90860e6c8a0f3e01f62c845e8691f7e5be73d5b444145b0ebe5b (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 podman[74624]: 2026-03-10 09:52:35.504810161 +0000 UTC m=+0.056992710 container init f37ff7258e9a90860e6c8a0f3e01f62c845e8691f7e5be73d5b444145b0ebe5b (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 podman[74624]: 2026-03-10 09:52:35.507748281 +0000 UTC m=+0.059930830 container start f37ff7258e9a90860e6c8a0f3e01f62c845e8691f7e5be73d5b444145b0ebe5b (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 bash[74624]: f37ff7258e9a90860e6c8a0f3e01f62c845e8691f7e5be73d5b444145b0ebe5b 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 podman[74624]: 2026-03-10 09:52:35.460526985 +0000 UTC m=+0.012709545 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 systemd[1]: Started Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.531Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.531Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.531Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm03 (none))" 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.532Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.532Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.535Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.541Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.542Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.542Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.543Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.543Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.093µs 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.543Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.561Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=1 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.561Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=1 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.561Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=16.12µs wal_replay_duration=18.33208ms wbl_replay_duration=201ns total_replay_duration=18.3691ms 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.568Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.568Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.568Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.583Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=14.52888ms db_storage=1.433µs remote_storage=1.182µs web_handler=642ns query_engine=1.263µs scrape=716.442µs scrape_sd=238.185µs notify=9.046µs notify_sd=7.113µs rules=13.189713ms tracing=6.192µs 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.583Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T09:52:35.799 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:35 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:35.583Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T09:52:35.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:35 vm00 ceph-mon[52384]: from='client.24868 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:35.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:35 vm00 ceph-mon[56720]: from='client.24868 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:36.152 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 systemd[1]: Stopping Ceph grafana.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[66055]: t=2026-03-10T09:52:36+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 podman[74763]: 2026-03-10 09:52:36.162309672 +0000 UTC m=+0.033485561 container died 738bbf68f8eed9c4cabf15e79c12cfd8cde267dbf7502496a8cf38a6f2322b3e (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, release=236.1648460182, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, maintainer=Paul Cuzner , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, com.redhat.component=ubi8-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi8, summary=Grafana Container configured for Ceph mgr/dashboard integration, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, io.k8s.display-name=Red Hat Universal Base Image 8, build-date=2022-03-28T10:36:18.413762, description=Ceph Grafana Container, io.openshift.tags=base rhel8, version=8.5, vendor=Red Hat, Inc., io.buildah.version=1.24.2, io.openshift.expose-services=) 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 podman[74763]: 2026-03-10 09:52:36.179870778 +0000 UTC m=+0.051046667 container remove 738bbf68f8eed9c4cabf15e79c12cfd8cde267dbf7502496a8cf38a6f2322b3e (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, name=ubi8, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.24.2, description=Ceph Grafana Container, io.openshift.tags=base rhel8, maintainer=Paul Cuzner , vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.component=ubi8-container, io.k8s.display-name=Red Hat Universal Base Image 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, architecture=x86_64, build-date=2022-03-28T10:36:18.413762, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Grafana Container configured for Ceph mgr/dashboard integration, version=8.5, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, release=236.1648460182) 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 bash[74763]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 bash[74781]: Error: no container with name or ID "ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana.a" found: no such container 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@grafana.a.service: Deactivated successfully. 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 systemd[1]: Stopped Ceph grafana.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@grafana.a.service: Consumed 1.157s CPU time. 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 systemd[1]: Starting Ceph grafana.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 podman[74821]: 2026-03-10 09:52:36.334772516 +0000 UTC m=+0.023139631 container create 011f2081bf92adf5420f7cf8779c02748b19d9c3b3dc6912550d6c6cdd54fcac (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.openshift.tags=base rhel8, description=Ceph Grafana Container, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, version=8.5, maintainer=Paul Cuzner , architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, com.redhat.component=ubi8-container, name=ubi8, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 8, build-date=2022-03-28T10:36:18.413762, io.buildah.version=1.24.2, vendor=Red Hat, Inc., release=236.1648460182, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.expose-services=, summary=Grafana Container configured for Ceph mgr/dashboard integration) 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 podman[74821]: 2026-03-10 09:52:36.367867705 +0000 UTC m=+0.056234829 container init 011f2081bf92adf5420f7cf8779c02748b19d9c3b3dc6912550d6c6cdd54fcac (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, io.openshift.expose-services=, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 8, summary=Grafana Container configured for Ceph mgr/dashboard integration, vendor=Red Hat, Inc., com.redhat.component=ubi8-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=Ceph Grafana Container, name=ubi8, version=8.5, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, build-date=2022-03-28T10:36:18.413762, io.buildah.version=1.24.2, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, release=236.1648460182, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base rhel8, maintainer=Paul Cuzner , architecture=x86_64, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56) 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 podman[74821]: 2026-03-10 09:52:36.370796167 +0000 UTC m=+0.059163282 container start 011f2081bf92adf5420f7cf8779c02748b19d9c3b3dc6912550d6c6cdd54fcac (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, architecture=x86_64, vendor=Red Hat, Inc., distribution-scope=public, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base rhel8, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi8-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=236.1648460182, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, name=ubi8, version=8.5, build-date=2022-03-28T10:36:18.413762, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, description=Ceph Grafana Container, io.openshift.expose-services=, maintainer=Paul Cuzner , summary=Grafana Container configured for Ceph mgr/dashboard integration, io.buildah.version=1.24.2) 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 bash[74821]: 011f2081bf92adf5420f7cf8779c02748b19d9c3b3dc6912550d6c6cdd54fcac 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 podman[74821]: 2026-03-10 09:52:36.323677334 +0000 UTC m=+0.012044459 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-10T09:52:36.418 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 systemd[1]: Started Ceph grafana.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="App mode production" logger=settings 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="migrations completed" logger=migrator performed=0 skipped=377 duration=317.815µs 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="deleted datasource based on configuration" logger=provisioning.datasources name=Dashboard1 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Loki uid=P8E80F9AEF21F6940 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-10T09:52:36.798 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T09:52:36+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-10T09:52:36.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:36] ENGINE Bus STOPPING 2026-03-10T09:52:36.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:36] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:36] ENGINE Bus STOPPED 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:36] ENGINE Bus STARTING 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:36] ENGINE Serving on http://:::9283 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:36] ENGINE Bus STARTED 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:36] ENGINE Bus STOPPING 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: Reconfiguring grafana.a (dependencies changed)... 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: Reconfiguring daemon grafana.a on vm03 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:36 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: Reconfiguring grafana.a (dependencies changed)... 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: Reconfiguring daemon grafana.a on vm03 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:52:36.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: Reconfiguring grafana.a (dependencies changed)... 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: Reconfiguring daemon grafana.a on vm03 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:36.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:36 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:37.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:52:37.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE Bus STOPPED 2026-03-10T09:52:37.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE Bus STARTING 2026-03-10T09:52:37.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE Serving on http://:::9283 2026-03-10T09:52:37.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE Bus STARTED 2026-03-10T09:52:37.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE Bus STOPPING 2026-03-10T09:52:37.849 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:37 vm00 systemd[1]: Stopping Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:37.849 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:37 vm00 podman[86340]: 2026-03-10 09:52:37.736281367 +0000 UTC m=+0.069046134 container died 1120e4152adf5f948cbbb635e15a4a06115b42261f4e1586222cb16cd6da152d (image=quay.io/ceph/ceph:v17.2.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, com.redhat.component=centos-stream-container, distribution-scope=public, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, version=8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, ceph=True, GIT_BRANCH=HEAD, io.buildah.version=1.19.8, architecture=x86_64, io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.tags=base centos centos-stream, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, vendor=Red Hat, Inc., GIT_CLEAN=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com) 2026-03-10T09:52:37.849 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:37 vm00 podman[86340]: 2026-03-10 09:52:37.770918662 +0000 UTC m=+0.103683429 container remove 1120e4152adf5f948cbbb635e15a4a06115b42261f4e1586222cb16cd6da152d (image=quay.io/ceph/ceph:v17.2.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, ceph=True, RELEASE=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.tags=base centos centos-stream, vcs-type=git, version=8, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, build-date=2022-05-03T08:36:31.336870, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, name=centos-stream, io.openshift.expose-services=, GIT_CLEAN=True, release=754, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8) 2026-03-10T09:52:37.849 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:37 vm00 bash[86340]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y 2026-03-10T09:52:37.849 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:37 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Main process exited, code=exited, status=143/n/a 2026-03-10T09:52:38.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:52:38.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE Bus STOPPED 2026-03-10T09:52:38.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE Bus STARTING 2026-03-10T09:52:38.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE Serving on http://:::9283 2026-03-10T09:52:38.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:37 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:37] ENGINE Bus STARTED 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: Upgrade: Updating mgr.y 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[52384]: Deploying daemon mgr.y on vm00 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: Upgrade: Updating mgr.y 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:38 vm00 ceph-mon[56720]: Deploying daemon mgr.y on vm00 2026-03-10T09:52:38.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:37 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Failed with result 'exit-code'. 2026-03-10T09:52:38.121 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:37 vm00 systemd[1]: Stopped Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:38.121 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:37 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Consumed 36.264s CPU time. 2026-03-10T09:52:38.121 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:38 vm00 systemd[1]: Starting Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:38.407 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:38 vm00 podman[86522]: 2026-03-10 09:52:38.176762341 +0000 UTC m=+0.025000737 container create 76b6e675ba89e5e592b174e61df1039fb4dcc8c6f731e8215cb99e620e03edbb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True) 2026-03-10T09:52:38.407 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:38 vm00 podman[86522]: 2026-03-10 09:52:38.220936649 +0000 UTC m=+0.069175045 container init 76b6e675ba89e5e592b174e61df1039fb4dcc8c6f731e8215cb99e620e03edbb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-10T09:52:38.407 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:38 vm00 podman[86522]: 2026-03-10 09:52:38.224989094 +0000 UTC m=+0.073227490 container start 76b6e675ba89e5e592b174e61df1039fb4dcc8c6f731e8215cb99e620e03edbb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS) 2026-03-10T09:52:38.407 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:38 vm00 bash[86522]: 76b6e675ba89e5e592b174e61df1039fb4dcc8c6f731e8215cb99e620e03edbb 2026-03-10T09:52:38.407 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:38 vm00 podman[86522]: 2026-03-10 09:52:38.167222175 +0000 UTC m=+0.015460580 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:52:38.407 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:38 vm00 systemd[1]: Started Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:38.407 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:38 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:38.378+0000 7fd087d2b140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: Upgrade: Updating mgr.y 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:38 vm03 ceph-mon[50536]: Deploying daemon mgr.y on vm00 2026-03-10T09:52:38.816 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:38 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:38.452+0000 7fd087d2b140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[52384]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 687 B/s rd, 0 op/s 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:39.278+0000 7fd087d2b140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[56720]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 687 B/s rd, 0 op/s 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:39.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:39 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:39.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:39 vm03 ceph-mon[50536]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 687 B/s rd, 0 op/s 2026-03-10T09:52:39.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:39 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:39.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:39 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:39.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:39 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:39.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:39 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:39.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:39 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:39.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:39 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:40.033 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:39.764+0000 7fd087d2b140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:52:40.033 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T09:52:40.033 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T09:52:40.033 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: from numpy import show_config as show_numpy_config 2026-03-10T09:52:40.033 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:39.913+0000 7fd087d2b140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:52:40.033 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:39.974+0000 7fd087d2b140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:52:40.286 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:40.079+0000 7fd087d2b140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: Deploying daemon agent.vm03 on vm03 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: Deploying daemon agent.vm00 on vm00 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 606 B/s rd, 0 op/s 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: Failing over to other MGR 2026-03-10T09:52:40.536 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: Deploying daemon agent.vm03 on vm03 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: Deploying daemon agent.vm00 on vm00 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 606 B/s rd, 0 op/s 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: Failing over to other MGR 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T09:52:40.537 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:40 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: Deploying daemon agent.vm03 on vm03 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: Deploying daemon agent.vm00 on vm00 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 606 B/s rd, 0 op/s 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: Failing over to other MGR 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 192.168.123.103:0/262432852' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T09:52:40.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:40 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' 2026-03-10T09:52:41.172 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:40.924+0000 7fd087d2b140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:52:41.172 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:41.104+0000 7fd087d2b140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:41.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:40 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ignoring --setuser ceph since I am not root 2026-03-10T09:52:41.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:40 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ignoring --setgroup ceph since I am not root 2026-03-10T09:52:41.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:41 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:41.028+0000 7f3c5a99a140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:52:41.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:41 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:41.085+0000 7f3c5a99a140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:52:41.465 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:41.169+0000 7fd087d2b140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:52:41.465 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:41.228+0000 7fd087d2b140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:41.465 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:41.302+0000 7fd087d2b140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:52:41.465 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:41.372+0000 7fd087d2b140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:52:41.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:41 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:41.538+0000 7f3c5a99a140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:52:41.833 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:41.625+0000 7fd087d2b140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:52:41.833 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:41.695+0000 7fd087d2b140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:42.113 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:41 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:41.894+0000 7f3c5a99a140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:52:42.114 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:41 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T09:52:42.114 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:41 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T09:52:42.114 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:41 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: from numpy import show_config as show_numpy_config 2026-03-10T09:52:42.114 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:41 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:41.989+0000 7f3c5a99a140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:52:42.114 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:42 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:42.028+0000 7f3c5a99a140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:52:42.114 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:41 vm03 ceph-mon[50536]: from='mgr.24740 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T09:52:42.114 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:41 vm03 ceph-mon[50536]: mgrmap e27: y(active, starting, since 0.907501s) 2026-03-10T09:52:42.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:41 vm00 ceph-mon[52384]: from='mgr.24740 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T09:52:42.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:41 vm00 ceph-mon[52384]: mgrmap e27: y(active, starting, since 0.907501s) 2026-03-10T09:52:42.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:41 vm00 ceph-mon[56720]: from='mgr.24740 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T09:52:42.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:41 vm00 ceph-mon[56720]: mgrmap e27: y(active, starting, since 0.907501s) 2026-03-10T09:52:42.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.008+0000 7fd087d2b140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T09:52:42.392 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.344+0000 7fd087d2b140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:52:42.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:42 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:42.113+0000 7f3c5a99a140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:52:42.674 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.391+0000 7fd087d2b140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:52:42.674 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.447+0000 7fd087d2b140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:52:42.674 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.540+0000 7fd087d2b140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:52:42.674 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.583+0000 7fd087d2b140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:52:42.935 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:42 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:42.669+0000 7f3c5a99a140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:52:42.935 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:42 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:42.801+0000 7f3c5a99a140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:42.935 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:42 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:42.846+0000 7f3c5a99a140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:52:42.935 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:42 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:42.886+0000 7f3c5a99a140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:42.953 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.673+0000 7fd087d2b140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:52:42.954 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.801+0000 7fd087d2b140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:42 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:42.935+0000 7f3c5a99a140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:42 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:42.978+0000 7f3c5a99a140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:43 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:43.176+0000 7f3c5a99a140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:43 vm03 ceph-mon[50536]: Active manager daemon y restarted 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:43 vm03 ceph-mon[50536]: Activating manager daemon y 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:43 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:43 vm03 ceph-mon[50536]: osdmap e87: 8 total, 8 up, 8 in 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:43 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:43 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:52:43.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:43 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[52384]: Active manager daemon y restarted 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[52384]: Activating manager daemon y 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[52384]: osdmap e87: 8 total, 8 up, 8 in 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[56720]: Active manager daemon y restarted 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[56720]: Activating manager daemon y 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[56720]: osdmap e87: 8 total, 8 up, 8 in 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:43 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.953+0000 7fd087d2b140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:42.992+0000 7fd087d2b140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:43] ENGINE Bus STARTING 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: CherryPy Checker: 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: The Application mounted at '' has an empty config. 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:43] ENGINE Serving on http://:::9283 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:43] ENGINE Bus STARTED 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:43] ENGINE Bus STOPPING 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:43] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:52:43.237 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:43] ENGINE Bus STOPPED 2026-03-10T09:52:43.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:43] ENGINE Bus STARTING 2026-03-10T09:52:43.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:43] ENGINE Serving on http://:::9283 2026-03-10T09:52:43.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:43] ENGINE Bus STARTED 2026-03-10T09:52:43.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:43 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:43.232+0000 7f3c5a99a140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:43.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:43 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:43.464+0000 7f3c5a99a140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T09:52:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:43.505Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:43.505Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:43.506Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:43.506Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:44.047 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:43 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:43.771+0000 7f3c5a99a140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:52:44.047 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:43 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:43.809+0000 7f3c5a99a140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:52:44.047 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:43 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:43.853+0000 7f3c5a99a140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:52:44.047 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:43 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:43.931+0000 7f3c5a99a140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:52:44.047 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:43 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:43.968+0000 7f3c5a99a140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:44.047+0000 7f3c5a99a140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:44.173+0000 7f3c5a99a140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: mgrmap e28: y(active, starting, since 0.0598226s) 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:52:44.323 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: Manager daemon y is now available 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:44.324 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:44 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:44.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: mgrmap e28: y(active, starting, since 0.0598226s) 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: Manager daemon y is now available 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: mgrmap e28: y(active, starting, since 0.0598226s) 2026-03-10T09:52:44.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: Manager daemon y is now available 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:44 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:44.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:44.323+0000 7f3c5a99a140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:52:44.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:44.367+0000 7f3c5a99a140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:52:44.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:44] ENGINE Bus STARTING 2026-03-10T09:52:44.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: CherryPy Checker: 2026-03-10T09:52:44.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: The Application mounted at '' has an empty config. 2026-03-10T09:52:44.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:44.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:44] ENGINE Serving on http://:::9283 2026-03-10T09:52:44.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:44] ENGINE Bus STARTED 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: mgrmap e29: y(active, since 1.08171s) 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: Standby manager daemon x started 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:45.097 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:45 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:45.373 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: mgrmap e29: y(active, since 1.08171s) 2026-03-10T09:52:45.373 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:45.373 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: Standby manager daemon x started 2026-03-10T09:52:45.373 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: mgrmap e29: y(active, since 1.08171s) 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: Standby manager daemon x started 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:45 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.359 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:44] ENGINE Bus STARTING 2026-03-10T09:52:46.359 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:44] ENGINE Serving on https://192.168.123.100:7151 2026-03-10T09:52:46.359 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:44] ENGINE Client ('192.168.123.100', 39282) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:52:46.359 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:52:46.359 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:52:46.359 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:44] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T09:52:46.359 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:44] ENGINE Bus STARTED 2026-03-10T09:52:46.359 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='client.24868 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:46.359 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: mgrmap e30: y(active, since 2s), standbys: x 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:46.360 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:46 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:44] ENGINE Bus STARTING 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:44] ENGINE Serving on https://192.168.123.100:7151 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:44] ENGINE Client ('192.168.123.100', 39282) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:44] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:44] ENGINE Bus STARTED 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='client.24868 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: mgrmap e30: y(active, since 2s), standbys: x 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:44] ENGINE Bus STARTING 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:44] ENGINE Serving on https://192.168.123.100:7151 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:44] ENGINE Client ('192.168.123.100', 39282) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:44] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:44] ENGINE Bus STARTED 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='client.24868 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: mgrmap e30: y(active, since 2s), standbys: x 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:46 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[52384]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[52384]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:47.271 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:47 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:47.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:47 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:47.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:47 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:47.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:47 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:47.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:47 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:47.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:47.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:47 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:47.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:47 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:47.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:47 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:47.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:47 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: mgrmap e31: y(active, since 4s), standbys: x 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:48 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:52:48.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: mgrmap e31: y(active, since 4s), standbys: x 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: mgrmap e31: y(active, since 4s), standbys: x 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:48.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:48 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:49.194 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:48 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:52:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.51.0" 2026-03-10T09:52:49.194 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:49 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3238794681' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:52:49.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:49 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3238794681' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:52:49.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:49 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3238794681' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:52:50.221 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:50 vm00 ceph-mon[52384]: Deploying daemon agent.vm03 on vm03 2026-03-10T09:52:50.221 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:50 vm00 ceph-mon[52384]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:50.221 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:50 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:50.221 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:50 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:50.221 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:50 vm00 ceph-mon[56720]: Deploying daemon agent.vm03 on vm03 2026-03-10T09:52:50.221 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:50 vm00 ceph-mon[56720]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:50.221 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:50 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:50.221 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:50 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:50.346 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:50 vm03 ceph-mon[50536]: Deploying daemon agent.vm03 on vm03 2026-03-10T09:52:50.346 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:50 vm03 ceph-mon[50536]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:50.346 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:50 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:50.346 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:50 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 systemd[1]: Stopping Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.473Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.473Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.473Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.473Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.473Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.473Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.473Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.473Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.473Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.479Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.479Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[74634]: ts=2026-03-10T09:52:50.479Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 podman[77130]: 2026-03-10 09:52:50.48885258 +0000 UTC m=+0.033626214 container died f37ff7258e9a90860e6c8a0f3e01f62c845e8691f7e5be73d5b444145b0ebe5b (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 podman[77130]: 2026-03-10 09:52:50.5153999 +0000 UTC m=+0.060173534 container remove f37ff7258e9a90860e6c8a0f3e01f62c845e8691f7e5be73d5b444145b0ebe5b (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 bash[77130]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@prometheus.a.service: Deactivated successfully. 2026-03-10T09:52:50.599 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 systemd[1]: Stopped Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:50.600 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 systemd[1]: Starting Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:50.868 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 podman[77248]: 2026-03-10 09:52:50.759891883 +0000 UTC m=+0.026254494 container create 7915b82021d62ddc5c07219f73432876e4cb30b70916c2ed3556cdfff6460df3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:50.869 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 podman[77248]: 2026-03-10 09:52:50.810886051 +0000 UTC m=+0.077248672 container init 7915b82021d62ddc5c07219f73432876e4cb30b70916c2ed3556cdfff6460df3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:50.869 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 podman[77248]: 2026-03-10 09:52:50.821516914 +0000 UTC m=+0.087879525 container start 7915b82021d62ddc5c07219f73432876e4cb30b70916c2ed3556cdfff6460df3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:50.869 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 bash[77248]: 7915b82021d62ddc5c07219f73432876e4cb30b70916c2ed3556cdfff6460df3 2026-03-10T09:52:50.869 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 podman[77248]: 2026-03-10 09:52:50.747872491 +0000 UTC m=+0.014235102 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T09:52:50.869 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 systemd[1]: Started Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:51.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:50 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:50] ENGINE Bus STOPPING 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.868Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.868Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.868Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm03 (none))" 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.868Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.868Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.880Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.880Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.883Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.883Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.885Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.885Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.413µs 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.885Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.914Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=2 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.916Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=2 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.916Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=2 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.916Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=269.605µs wal_replay_duration=30.858812ms wbl_replay_duration=130ns total_replay_duration=31.142112ms 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.917Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.917Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.917Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.937Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=20.187554ms db_storage=561ns remote_storage=1.152µs web_handler=320ns query_engine=532ns scrape=2.600037ms scrape_sd=72.515µs notify=7.274µs notify_sd=5.04µs rules=17.180656ms tracing=5.15µs 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.937Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T09:52:51.201 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:50.937Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: Reconfiguring daemon prometheus.a on vm03 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:51.480 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: osdmap e88: 8 total, 8 up, 8 in 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[52384]: Standby manager daemon y started 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: Reconfiguring daemon prometheus.a on vm03 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: osdmap e88: 8 total, 8 up, 8 in 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mon[56720]: Standby manager daemon y started 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:51 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:51] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:51 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:51] ENGINE Bus STOPPED 2026-03-10T09:52:51.481 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:51 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:51] ENGINE Bus STARTING 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: Reconfiguring daemon prometheus.a on vm03 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 192.168.123.100:0/930671340' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: osdmap e88: 8 total, 8 up, 8 in 2026-03-10T09:52:51.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:51 vm03 ceph-mon[50536]: Standby manager daemon y started 2026-03-10T09:52:51.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:51 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:51] ENGINE Serving on http://:::9283 2026-03-10T09:52:51.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:51 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:51] ENGINE Bus STARTED 2026-03-10T09:52:52.202 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:51 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:51] ENGINE Bus STOPPING 2026-03-10T09:52:52.202 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:51 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:51] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:52:52.202 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:51 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:51] ENGINE Bus STOPPED 2026-03-10T09:52:52.202 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:52 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:52] ENGINE Bus STARTING 2026-03-10T09:52:52.204 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: Failing over to other MGR 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: mgrmap e32: x(active, starting, since 0.9739s), standbys: y 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: Manager daemon x is now available 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:52.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: Failing over to other MGR 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: mgrmap e32: x(active, starting, since 0.9739s), standbys: y 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: Manager daemon x is now available 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:52.206 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:51 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: ignoring --setuser ceph since I am not root 2026-03-10T09:52:52.207 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:51 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: ignoring --setgroup ceph since I am not root 2026-03-10T09:52:52.207 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:51 vm00 ceph-mgr[86542]: -- 192.168.123.100:0/3627819660 <== mon.0 v2:192.168.123.100:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x55d5281c74a0 con 0x55d5281a4800 2026-03-10T09:52:52.207 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:52.054+0000 7f32bf53f140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:52:52.207 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:52.106+0000 7f32bf53f140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:52:52.540 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:52.540 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:52.540 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:52.540 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:52.540 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:52.540 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:52 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:52] ENGINE Serving on http://:::9283 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:52 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:52] ENGINE Bus STARTED 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: Failing over to other MGR 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: mgrmap e32: x(active, starting, since 0.9739s), standbys: y 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:52:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24827 ' entity='mgr.y' 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: Manager daemon x is now available 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:52.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T09:52:52.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:52.539+0000 7f32bf53f140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:52:53.212 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:52.891+0000 7f32bf53f140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:52:53.212 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T09:52:53.213 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T09:52:53.213 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: from numpy import show_config as show_numpy_config 2026-03-10T09:52:53.213 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:52.982+0000 7f32bf53f140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:52:53.213 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:53.028+0000 7f32bf53f140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:52:53.213 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:53.119+0000 7f32bf53f140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:52:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:53 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:53 vm00 ceph-mon[52384]: mgrmap e33: x(active, since 2s), standbys: y 2026-03-10T09:52:53.508 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:53.508 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:53 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:53.508 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:53 vm00 ceph-mon[56720]: mgrmap e33: x(active, since 2s), standbys: y 2026-03-10T09:52:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:53 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:53 vm03 ceph-mon[50536]: mgrmap e33: x(active, since 2s), standbys: y 2026-03-10T09:52:53.795 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:53.506Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:53.795 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:53.506Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:53.795 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:53.509Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:53.795 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:52:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:52:53.509Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:52:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:53] ENGINE Bus STARTING 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[52384]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:53] ENGINE Bus STARTING 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[56720]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:54 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:54.104+0000 7f32bf53f140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:52:54.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:54 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:54.334+0000 7f32bf53f140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:54.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:54 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:54.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:54 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:53] ENGINE Bus STARTING 2026-03-10T09:52:54.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:54 vm03 ceph-mon[50536]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:54.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:54.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:54 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:52:54.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:54.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:52:54.711 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:54 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:54.458+0000 7f32bf53f140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:52:54.712 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:54 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:54.569+0000 7f32bf53f140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:55.027 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:54 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:54.723+0000 7f32bf53f140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:52:55.027 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:54 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:54.775+0000 7f32bf53f140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:52:55.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:55.032+0000 7f32bf53f140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:52:55.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:55.112+0000 7f32bf53f140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:52:55.848 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:55.526+0000 7f32bf53f140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:54] ENGINE Serving on http://192.168.123.103:8765 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:54] ENGINE Serving on https://192.168.123.103:7151 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:54] ENGINE Bus STARTED 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: [10/Mar/2026:09:52:54] ENGINE Client ('192.168.123.103', 53314) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: mgrmap e34: x(active, since 4s), standbys: y 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:54] ENGINE Serving on http://192.168.123.103:8765 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:54] ENGINE Serving on https://192.168.123.103:7151 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:54] ENGINE Bus STARTED 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: [10/Mar/2026:09:52:54] ENGINE Client ('192.168.123.103', 53314) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: mgrmap e34: x(active, since 4s), standbys: y 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:55.848+0000 7f32bf53f140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:55.895+0000 7f32bf53f140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:55 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:55.940+0000 7f32bf53f140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:56.034+0000 7f32bf53f140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:52:56.121 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:56.076+0000 7f32bf53f140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:54] ENGINE Serving on http://192.168.123.103:8765 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:54] ENGINE Serving on https://192.168.123.103:7151 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:54] ENGINE Bus STARTED 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: [10/Mar/2026:09:52:54] ENGINE Client ('192.168.123.103', 53314) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: mgrmap e34: x(active, since 4s), standbys: y 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:56.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:56.449 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:56.166+0000 7f32bf53f140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:52:56.449 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:56.306+0000 7f32bf53f140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:52:56.707 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:56.648+0000 7f32bf53f140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:52:56.996 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:56.996 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:56.996 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:56.996 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:56.996 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:56.996 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:56.996 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:56.996 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: Standby manager daemon y restarted 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: Standby manager daemon y started 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: Standby manager daemon y restarted 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: Standby manager daemon y started 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:56.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:56 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:56.998 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:56.738+0000 7f32bf53f140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:52:56.998 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:56] ENGINE Bus STARTING 2026-03-10T09:52:56.998 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: CherryPy Checker: 2026-03-10T09:52:56.998 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: The Application mounted at '' has an empty config. 2026-03-10T09:52:56.998 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: 2026-03-10T09:52:56.998 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:56] ENGINE Serving on http://:::9283 2026-03-10T09:52:56.998 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:52:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[86537]: [10/Mar/2026:09:52:56] ENGINE Bus STARTED 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:52:56.961+0000 7f3c182e4640 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Creating ceph-iscsi config... 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Traceback (most recent call last): 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: return _run_code(code, main_globals, None, 2026-03-10T09:52:57.301 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: exec(code, run_globals) 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Traceback (most recent call last): 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T09:52:57.302 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:56.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: Standby manager daemon y restarted 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: Standby manager daemon y started 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:52:57.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2627898544' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:56 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: return self.wait_async( 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: return self.event_loop.get_result(coro, timeout) 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: return future.result(timeout) 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: return self.__get_result() 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: raise self._exception 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: out, err, code = await self._run_cephadm( 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: raise OrchestratorError( 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Creating ceph-iscsi config... 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Traceback (most recent call last): 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: return _run_code(code, main_globals, None, 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: exec(code, run_globals) 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:57.303 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 systemd[1]: Stopping Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.868Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.868Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.868Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.868Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.869Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.869Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.869Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.869Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.870Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.870Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T09:52:57.956 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.870Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-10T09:52:57.957 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[77270]: ts=2026-03-10T09:52:57.871Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-10T09:52:57.957 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 podman[78859]: 2026-03-10 09:52:57.882974737 +0000 UTC m=+0.034579799 container died 7915b82021d62ddc5c07219f73432876e4cb30b70916c2ed3556cdfff6460df3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:57.957 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 podman[78859]: 2026-03-10 09:52:57.903001842 +0000 UTC m=+0.054606913 container remove 7915b82021d62ddc5c07219f73432876e4cb30b70916c2ed3556cdfff6460df3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:57.957 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 bash[78859]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a 2026-03-10T09:52:57.957 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:57 vm03 ceph-mon[50536]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:52:57.957 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:57 vm03 ceph-mon[50536]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:52:57.957 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:57 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:57.957 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:57 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:57.957 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:57 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:57.957 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:57 vm03 ceph-mon[50536]: mgrmap e35: x(active, since 6s), standbys: y 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[52384]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[52384]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[52384]: mgrmap e35: x(active, since 6s), standbys: y 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[56720]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[56720]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:58.204 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:57 vm00 ceph-mon[56720]: mgrmap e35: x(active, since 6s), standbys: y 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@prometheus.a.service: Deactivated successfully. 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 systemd[1]: Stopped Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:57 vm03 systemd[1]: Starting Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 podman[78928]: 2026-03-10 09:52:58.087432471 +0000 UTC m=+0.024695454 container create 983b87a269f733bba7b2f3bcee20c09ac626f8ce733dd5778f4fbfb4589cb617 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 podman[78928]: 2026-03-10 09:52:58.114728633 +0000 UTC m=+0.051991637 container init 983b87a269f733bba7b2f3bcee20c09ac626f8ce733dd5778f4fbfb4589cb617 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 podman[78928]: 2026-03-10 09:52:58.117628032 +0000 UTC m=+0.054891025 container start 983b87a269f733bba7b2f3bcee20c09ac626f8ce733dd5778f4fbfb4589cb617 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 bash[78928]: 983b87a269f733bba7b2f3bcee20c09ac626f8ce733dd5778f4fbfb4589cb617 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 podman[78928]: 2026-03-10 09:52:58.075854987 +0000 UTC m=+0.013117990 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 systemd[1]: Started Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.154Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.154Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.154Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm03 (none))" 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.154Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.154Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.157Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.158Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.160Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.160Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.683µs 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.160Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.162Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.162Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.192Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=3 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.198Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=3 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.205Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=3 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.205Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=3 2026-03-10T09:52:58.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.205Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=20.899µs wal_replay_duration=44.921879ms wbl_replay_duration=390ns total_replay_duration=45.041364ms 2026-03-10T09:52:58.234 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.209Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T09:52:58.234 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.209Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T09:52:58.234 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.209Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T09:52:58.234 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:58] ENGINE Bus STOPPING 2026-03-10T09:52:58.547 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.232Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=22.85088ms db_storage=1.062µs remote_storage=1.703µs web_handler=591ns query_engine=1.203µs scrape=729.225µs scrape_sd=125.325µs notify=10.93µs notify_sd=14.357µs rules=21.588236ms tracing=6.783µs 2026-03-10T09:52:58.548 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.233Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T09:52:58.548 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:52:58.233Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T09:52:58.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:58] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:52:58.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:58] ENGINE Bus STOPPED 2026-03-10T09:52:58.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:58] ENGINE Bus STARTING 2026-03-10T09:52:58.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:58] ENGINE Serving on http://:::9283 2026-03-10T09:52:58.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:52:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:52:58] ENGINE Bus STARTED 2026-03-10T09:52:59.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: exec(code, run_globals) 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.wait_async( 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: return future.result(timeout) 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.__get_result() 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: raise self._exception 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: out, err, code = await self._run_cephadm( 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: raise OrchestratorError( 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: exec(code, run_globals) 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:52:59.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: Reconfiguring daemon prometheus.a on vm03 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: exec(code, run_globals) 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.wait_async( 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: return future.result(timeout) 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.__get_result() 2026-03-10T09:52:59.121 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: raise self._exception 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: out, err, code = await self._run_cephadm( 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: raise OrchestratorError( 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: exec(code, run_globals) 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: Reconfiguring daemon prometheus.a on vm03 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:59.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:59.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Creating ceph-iscsi config... 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Traceback (most recent call last): 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: exec(code, run_globals) 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Traceback (most recent call last): 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: return self.wait_async( 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: return future.result(timeout) 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: return self.__get_result() 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: raise self._exception 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: out, err, code = await self._run_cephadm( 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: raise OrchestratorError( 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Creating ceph-iscsi config... 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: Traceback (most recent call last): 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: exec(code, run_globals) 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:52:59.213 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: Reconfiguring daemon prometheus.a on vm03 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:52:59.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:53:00.236 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: Upgrade: Setting container_image for all mgr 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: Deploying daemon agent.vm00 on vm00 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 12 op/s 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:00.237 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:52:59 vm03 ceph-mon[50536]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:53:00.244 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: Upgrade: Setting container_image for all mgr 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: Deploying daemon agent.vm00 on vm00 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 12 op/s 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:00.245 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[56720]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: Upgrade: Setting container_image for all mgr 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: Deploying daemon agent.vm00 on vm00 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 12 op/s 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:00.247 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:52:59 vm00 ceph-mon[52384]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:53:01.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:01.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[56720]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:53:01.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:01.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:01.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:53:01.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:53:01.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:53:01.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[56720]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:53:01.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:01.222 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:01.222 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[52384]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:53:01.222 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:01.222 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:01.222 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:53:01.222 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:53:01.222 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:53:01.222 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[52384]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:53:01.222 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:01 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:01 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:01 vm03 ceph-mon[50536]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:53:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:01 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:01 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:01 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:53:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:01 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:53:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:01 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:53:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:01 vm03 ceph-mon[50536]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:53:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:01 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:01.944 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[56720]: Upgrade: Updating mgr.y 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[56720]: Deploying daemon mgr.y on vm00 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[56720]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2738904405' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[56720]: from='client.24905 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[52384]: Upgrade: Updating mgr.y 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[52384]: Deploying daemon mgr.y on vm00 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[52384]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2738904405' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:53:02.242 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:02 vm00 ceph-mon[52384]: from='client.24905 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:02 vm03 ceph-mon[50536]: Upgrade: Updating mgr.y 2026-03-10T09:53:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:02 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:02 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:02 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:53:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:02 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:02 vm03 ceph-mon[50536]: Deploying daemon mgr.y on vm00 2026-03-10T09:53:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:02 vm03 ceph-mon[50536]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:53:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:02 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2738904405' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:53:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:02 vm03 ceph-mon[50536]: from='client.24905 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 starting - - - - 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 running 11s ago 6m - - 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (42s) 3s ago 4m 17.6M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (26s) 11s ago 4m 41.7M - dad864ee21e9 011f2081bf92 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 starting - - - - 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283 running (52s) 11s ago 6m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 2d74d7e9d583 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (24s) 3s ago 6m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 76b6e675ba89 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (6m) 3s ago 6m 57.9M 2048M 17.2.0 e1d6a67b021e 9e4f29548d3b 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (6m) 11s ago 6m 47.4M 2048M 17.2.0 e1d6a67b021e 6c31bf149b5f 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (6m) 3s ago 6m 48.6M 2048M 17.2.0 e1d6a67b021e 9b572ece92f4 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (38s) 3s ago 4m 8916k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (33s) 11s ago 4m 8950k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (5m) 3s ago 5m 55.5M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (5m) 3s ago 5m 53.0M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (5m) 3s ago 5m 49.0M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (5m) 3s ago 5m 50.0M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (5m) 11s ago 5m 53.2M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (5m) 11s ago 5m 52.1M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (5m) 11s ago 5m 50.3M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (5m) 11s ago 5m 50.4M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 starting - - - - 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (4m) 3s ago 4m 92.5M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (4m) 11s ago 4m 93.9M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (4m) 3s ago 4m 93.1M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:53:02.704 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (4m) 11s ago 4m 90.7M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:53:03.009 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:02 vm00 systemd[1]: Stopping Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 15, 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:53:03.039 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:53:03.268 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 podman[93298]: 2026-03-10 09:53:03.08589442 +0000 UTC m=+0.110221015 container stop 76b6e675ba89e5e592b174e61df1039fb4dcc8c6f731e8215cb99e620e03edbb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:53:03.268 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 podman[93298]: 2026-03-10 09:53:03.090138454 +0000 UTC m=+0.114465060 container died 76b6e675ba89e5e592b174e61df1039fb4dcc8c6f731e8215cb99e620e03edbb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:53:03.268 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 podman[93298]: 2026-03-10 09:53:03.193973295 +0000 UTC m=+0.218299881 container remove 76b6e675ba89e5e592b174e61df1039fb4dcc8c6f731e8215cb99e620e03edbb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:53:03.268 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 bash[93298]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y 2026-03-10T09:53:03.268 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Main process exited, code=exited, status=143/n/a 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "2/23 daemons upgraded", 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading mgr daemons", 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:53:03.338 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3981673003' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/182320712"}]: dispatch 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/182320712"}]: dispatch 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[52384]: from='client.24917 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[52384]: from='client.24923 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2190959639' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3981673003' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/182320712"}]: dispatch 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/182320712"}]: dispatch 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[56720]: from='client.24917 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:03.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[56720]: from='client.24923 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:03.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:03 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2190959639' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:03.522 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:03.512Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:03.522 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:03.516Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:03.522 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Failed with result 'exit-code'. 2026-03-10T09:53:03.522 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 systemd[1]: Stopped Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:53:03.522 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Consumed 13.891s CPU time. 2026-03-10T09:53:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:03 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3981673003' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/182320712"}]: dispatch 2026-03-10T09:53:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:03 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/182320712"}]: dispatch 2026-03-10T09:53:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:03 vm03 ceph-mon[50536]: from='client.24917 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:03 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:03 vm03 ceph-mon[50536]: from='client.24923 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:03 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2190959639' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:03.755 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T09:53:03.755 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T09:53:03.755 INFO:teuthology.orchestra.run.vm00.stdout: daemon iscsi.foo.vm00.dqkdwh on vm00 is in unknown state 2026-03-10T09:53:03.843 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 systemd[1]: Starting Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:53:03.843 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:03.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:03.843 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:03.521Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:04.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 podman[93534]: 2026-03-10 09:53:03.87939027 +0000 UTC m=+0.027079939 container create 6288efd98c12dfd023905f340f11da3a922346adf0a6a4bc85348c76539b3c05 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, ceph=True, version=8, vendor=Red Hat, Inc., io.k8s.display-name=CentOS Stream 8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, build-date=2022-05-03T08:36:31.336870, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.expose-services=, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, release=754, io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, RELEASE=HEAD, vcs-type=git, GIT_CLEAN=True, GIT_BRANCH=HEAD, distribution-scope=public) 2026-03-10T09:53:04.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 podman[93534]: 2026-03-10 09:53:03.915003752 +0000 UTC m=+0.062693442 container init 6288efd98c12dfd023905f340f11da3a922346adf0a6a4bc85348c76539b3c05 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, com.redhat.component=centos-stream-container, maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, GIT_BRANCH=HEAD, distribution-scope=public, io.buildah.version=1.19.8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc., io.k8s.display-name=CentOS Stream 8, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, release=754, architecture=x86_64, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, RELEASE=HEAD) 2026-03-10T09:53:04.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 podman[93534]: 2026-03-10 09:53:03.919290606 +0000 UTC m=+0.066980286 container start 6288efd98c12dfd023905f340f11da3a922346adf0a6a4bc85348c76539b3c05 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , distribution-scope=public, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, release=754, io.buildah.version=1.19.8, io.openshift.expose-services=, RELEASE=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, vendor=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_BRANCH=HEAD, ceph=True, io.k8s.display-name=CentOS Stream 8, build-date=2022-05-03T08:36:31.336870, vcs-type=git, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, CEPH_POINT_RELEASE=-17.2.0, name=centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com) 2026-03-10T09:53:04.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 bash[93534]: 6288efd98c12dfd023905f340f11da3a922346adf0a6a4bc85348c76539b3c05 2026-03-10T09:53:04.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 podman[93534]: 2026-03-10 09:53:03.869038322 +0000 UTC m=+0.016728012 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a 2026-03-10T09:53:04.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:03 vm00 systemd[1]: Started Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:53:04.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:04.085+0000 7efdb04bd000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:53:04.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/182320712"}]': finished 2026-03-10T09:53:04.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: osdmap e89: 8 total, 8 up, 8 in 2026-03-10T09:53:04.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='client.24926 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:04.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4107612727' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:53:04.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T09:53:04.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-10T09:53:04.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1929527963' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2764990511"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2764990511"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:04 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/182320712"}]': finished 2026-03-10T09:53:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: osdmap e89: 8 total, 8 up, 8 in 2026-03-10T09:53:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='client.24926 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4107612727' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:53:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1929527963' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2764990511"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2764990511"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/182320712"}]': finished 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: osdmap e89: 8 total, 8 up, 8 in 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='client.24926 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4107612727' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1929527963' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2764990511"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2764990511"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:04 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:04.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:04.173+0000 7efdb04bd000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:53:04.970 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:04.690+0000 7efdb04bd000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.518 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Cluster is now healthy 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2764990511"}]': finished 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: osdmap e90: 8 total, 8 up, 8 in 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.519 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Cluster is now healthy 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2764990511"}]': finished 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: osdmap e90: 8 total, 8 up, 8 in 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:05 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.520 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:05 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:05.276+0000 7efdb04bd000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.540 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Cluster is now healthy 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2764990511"}]': finished 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: osdmap e90: 8 total, 8 up, 8 in 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:05.541 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:05 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:05.861 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:05 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:05.580+0000 7efdb04bd000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:53:05.861 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:05 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:05.711+0000 7efdb04bd000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1213724466' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2180304851"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2180304851"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 340 B/s rd, 0 op/s 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.445 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2180304851"}]': finished 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[52384]: osdmap e91: 8 total, 8 up, 8 in 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1213724466' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2180304851"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2180304851"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 340 B/s rd, 0 op/s 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2180304851"}]': finished 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:06 vm00 ceph-mon[56720]: osdmap e91: 8 total, 8 up, 8 in 2026-03-10T09:53:06.446 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:06.153+0000 7efdb04bd000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1213724466' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2180304851"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2180304851"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 340 B/s rd, 0 op/s 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.507 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2180304851"}]': finished 2026-03-10T09:53:06.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:06 vm03 ceph-mon[50536]: osdmap e91: 8 total, 8 up, 8 in 2026-03-10T09:53:07.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/116010921' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3150516231"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/116010921' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3150516231"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:07.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:07 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.622 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:07 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:07.401+0000 7efdb04bd000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/116010921' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3150516231"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:07.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:07.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:07 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.041 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:07 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:07.876+0000 7efdb04bd000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:53:08.041 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:08.039+0000 7efdb04bd000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:53:08.309 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:08.205+0000 7efdb04bd000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/116010921' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3150516231"}]': finished 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: osdmap e92: 8 total, 8 up, 8 in 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/2430159309' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4266218054"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4266218054"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/116010921' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3150516231"}]': finished 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: osdmap e92: 8 total, 8 up, 8 in 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/2430159309' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4266218054"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4266218054"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:08 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:08.309+0000 7efdb04bd000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:53:08.621 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:08.418+0000 7efdb04bd000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/116010921' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/3150516231"}]': finished 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: osdmap e92: 8 total, 8 up, 8 in 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.740 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:08.741 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/2430159309' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4266218054"}]: dispatch 2026-03-10T09:53:08.741 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4266218054"}]: dispatch 2026-03-10T09:53:08.741 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:08.741 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:08.741 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:08.741 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:08.741 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:08 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.015 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:09.012+0000 7efdb04bd000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:53:09.049 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:53:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:53:08] "GET /metrics HTTP/1.1" 200 37773 "" "Prometheus/2.51.0" 2026-03-10T09:53:09.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:09.155+0000 7efdb04bd000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4266218054"}]': finished 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: osdmap e93: 8 total, 8 up, 8 in 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:09 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4266218054"}]': finished 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: osdmap e93: 8 total, 8 up, 8 in 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.813 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4266218054"}]': finished 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: osdmap e93: 8 total, 8 up, 8 in 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:09.815 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:09 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3278247048' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3150516231"}]: dispatch 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.728 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3278247048' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3150516231"}]: dispatch 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:10 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.729 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:10.604+0000 7efdb04bd000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3278247048' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3150516231"}]: dispatch 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:10 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:11.061 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:10.766+0000 7efdb04bd000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:53:11.062 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:10.886+0000 7efdb04bd000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:53:11.357 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:11.061+0000 7efdb04bd000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:53:11.357 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:11.149+0000 7efdb04bd000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:53:11.357 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:11.259+0000 7efdb04bd000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:53:11.619 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:11.362+0000 7efdb04bd000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[52384]: Upgrade: Updating mgr.y 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[52384]: Deploying daemon mgr.y on vm00 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3278247048' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3150516231"}]': finished 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[52384]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:11.780+0000 7efdb04bd000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 systemd[1]: Stopping Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: 2026-03-10T09:53:11.851+0000 7efdb04bd000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[93545]: [10/Mar/2026:09:53:11] ENGINE Bus STARTING 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 podman[95699]: 2026-03-10 09:53:11.916863409 +0000 UTC m=+0.054556232 container died 6288efd98c12dfd023905f340f11da3a922346adf0a6a4bc85348c76539b3c05 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, GIT_CLEAN=True, GIT_BRANCH=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_REPO=https://github.com/ceph/ceph-container.git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, RELEASE=HEAD, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, maintainer=Guillaume Abrioux , release=754, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, io.buildah.version=1.19.8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, version=8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.component=centos-stream-container, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, vendor=Red Hat, Inc., io.openshift.expose-services=, io.openshift.tags=base centos centos-stream) 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[56720]: Upgrade: Updating mgr.y 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[56720]: Deploying daemon mgr.y on vm00 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3278247048' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3150516231"}]': finished 2026-03-10T09:53:11.917 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:11 vm00 ceph-mon[56720]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T09:53:12.016 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:11 vm03 ceph-mon[50536]: Upgrade: Updating mgr.y 2026-03-10T09:53:12.016 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:11 vm03 ceph-mon[50536]: Deploying daemon mgr.y on vm00 2026-03-10T09:53:12.016 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:11 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.016 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:11 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3278247048' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/3150516231"}]': finished 2026-03-10T09:53:12.016 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:11 vm03 ceph-mon[50536]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T09:53:12.181 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 podman[95699]: 2026-03-10 09:53:11.942278291 +0000 UTC m=+0.079971114 container remove 6288efd98c12dfd023905f340f11da3a922346adf0a6a4bc85348c76539b3c05 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., RELEASE=HEAD, io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, version=8, architecture=x86_64, io.openshift.expose-services=, release=754, vcs-type=git, GIT_BRANCH=HEAD, com.redhat.component=centos-stream-container, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0, ceph=True, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream) 2026-03-10T09:53:12.181 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 bash[95699]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y 2026-03-10T09:53:12.181 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:11 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Main process exited, code=exited, status=143/n/a 2026-03-10T09:53:12.181 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Failed with result 'exit-code'. 2026-03-10T09:53:12.181 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 systemd[1]: Stopped Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:53:12.181 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Consumed 5.389s CPU time. 2026-03-10T09:53:12.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 systemd[1]: Starting Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:53:12.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 podman[95820]: 2026-03-10 09:53:12.273473142 +0000 UTC m=+0.016600746 container create a31a53f9eed3f2c67abf307bbad9373cbbfcf6d68b2c455713adb4194ad66f4f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:53:12.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 podman[95820]: 2026-03-10 09:53:12.3097362 +0000 UTC m=+0.052863813 container init a31a53f9eed3f2c67abf307bbad9373cbbfcf6d68b2c455713adb4194ad66f4f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0) 2026-03-10T09:53:12.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 podman[95820]: 2026-03-10 09:53:12.312790277 +0000 UTC m=+0.055917881 container start a31a53f9eed3f2c67abf307bbad9373cbbfcf6d68b2c455713adb4194ad66f4f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223) 2026-03-10T09:53:12.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 bash[95820]: a31a53f9eed3f2c67abf307bbad9373cbbfcf6d68b2c455713adb4194ad66f4f 2026-03-10T09:53:12.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 podman[95820]: 2026-03-10 09:53:12.266513675 +0000 UTC m=+0.009641288 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:53:12.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 systemd[1]: Started Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:53:12.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:12.415+0000 7f8b01a41140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:53:12.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:12.461+0000 7f8b01a41140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 928 B/s rd, 0 op/s 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: Standby manager daemon y restarted 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: Standby manager daemon y started 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 928 B/s rd, 0 op/s 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: Standby manager daemon y restarted 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: Standby manager daemon y started 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:12 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 928 B/s rd, 0 op/s 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: Standby manager daemon y restarted 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: Standby manager daemon y started 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:12 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.249 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:12.940+0000 7f8b01a41140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:53:13.518 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:13.448+0000 7f8b01a41140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:53:13.518 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:13.517Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:13.518 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:13.517Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:13.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: mgrmap e36: x(active, since 21s), standbys: y 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:13 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: mgrmap e36: x(active, since 21s), standbys: y 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: mgrmap e36: x(active, since 21s), standbys: y 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:13.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:13 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:13.860 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T09:53:13.860 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T09:53:13.860 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: from numpy import show_config as show_numpy_config 2026-03-10T09:53:13.861 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:13.661+0000 7f8b01a41140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:53:13.861 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:13.730+0000 7f8b01a41140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:53:13.861 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:13.855+0000 7f8b01a41140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:53:13.861 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:13.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:13.861 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:13.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:14.397 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:14.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1 op/s 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.800 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:14.698+0000 7f8b01a41140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1 op/s 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:14.805 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:14 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1 op/s 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:14 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.093 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:14.949+0000 7f8b01a41140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:53:15.093 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:15.003+0000 7f8b01a41140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:53:15.374 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:15.092+0000 7f8b01a41140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:53:15.374 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:15.164+0000 7f8b01a41140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:53:15.374 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:15.288+0000 7f8b01a41140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.752 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.753 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:15 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:15.586+0000 7f8b01a41140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:53:15.814 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:15.699+0000 7f8b01a41140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:15.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:15 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:16.091+0000 7f8b01a41140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.950 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:16 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.971 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.971 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.971 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.971 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:16 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:16.702+0000 7f8b01a41140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:16.773+0000 7f8b01a41140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:53:16.973 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:16.867+0000 7f8b01a41140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:53:17.212 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:17.275 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:17.022+0000 7f8b01a41140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:53:17.275 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:17.085+0000 7f8b01a41140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:53:17.275 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:17.193+0000 7f8b01a41140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:53:17.527 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:17.399+0000 7f8b01a41140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Standby manager daemon y restarted 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: Standby manager daemon y started 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:17.597+0000 7f8b01a41140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:17.637+0000 7f8b01a41140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:53:17] ENGINE Bus STARTING 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: CherryPy Checker: 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: The Application mounted at '' has an empty config. 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:53:17] ENGINE Serving on http://:::9283 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:53:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:53:17] ENGINE Bus STARTED 2026-03-10T09:53:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Standby manager daemon y restarted 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: Standby manager daemon y started 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:17 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Standby manager daemon y restarted 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: Standby manager daemon y started 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:17 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 591 B/s rd, 0 op/s 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.743 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 591 B/s rd, 0 op/s 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: mgrmap e37: x(active, since 27s), standbys: y 2026-03-10T09:53:18.745 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 591 B/s rd, 0 op/s 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: mgrmap e37: x(active, since 27s), standbys: y 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:18 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:19.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: mgrmap e37: x(active, since 27s), standbys: y 2026-03-10T09:53:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:18 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:19.286 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:53:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:53:18] "GET /metrics HTTP/1.1" 200 37773 "" "Prometheus/2.51.0" 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:19.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:19.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:19 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:19 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.005 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1022 B/s rd, 0 op/s 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.006 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.007 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:20 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1022 B/s rd, 0 op/s 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1022 B/s rd, 0 op/s 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:20 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:21 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:21 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 915 B/s rd, 0 op/s 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 915 B/s rd, 0 op/s 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:22 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 915 B/s rd, 0 op/s 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.261 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:23.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:22 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:23.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:23.518Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:23.770 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:23.518Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:23.770 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:23.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:23.770 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:23.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:23 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.155 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:24.156 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:23 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:24.548 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:24.149Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:25.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:24 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:25.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:25.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:25.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:25.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:25.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:24 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:25 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:25 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.889 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:26.890 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:26 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:27.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:26 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:27.298 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.059 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:27 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:28.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:27 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:53:28 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:53:28] "GET /metrics HTTP/1.1" 200 37773 "" "Prometheus/2.51.0" 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.182 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.183 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:28 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:29.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:29.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:29.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:29.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:29.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:28 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.258 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.258 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.258 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.259 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:29 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:29 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.107 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.108 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:30 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:30 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:31 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:32.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:31 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.251 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.252 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.253 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.253 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.253 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:32 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:33.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:32 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:33.845 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:33.519Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:33.845 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:33.519Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:33.845 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:33.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:33.845 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:33.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:34.045 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:33 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:34.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:33 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:34.482 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:34.501 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:53:34.501 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 17s ago 6m - - 2026-03-10T09:53:34.501 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 running 42s ago 6m - - 2026-03-10T09:53:34.501 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (73s) 17s ago 5m 18.6M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:53:34.501 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (58s) 42s ago 4m 41.7M - dad864ee21e9 011f2081bf92 2026-03-10T09:53:34.501 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (33s) 17s ago 4m 43.8M - 3.5 e1d6a67b021e 798c3470c0d4 2026-03-10T09:53:34.501 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283 running (84s) 42s ago 6m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 2d74d7e9d583 2026-03-10T09:53:34.501 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (22s) 17s ago 7m 394M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:53:34.501 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (7m) 17s ago 7m 59.4M 2048M 17.2.0 e1d6a67b021e 9e4f29548d3b 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (6m) 42s ago 6m 47.4M 2048M 17.2.0 e1d6a67b021e 6c31bf149b5f 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (6m) 17s ago 6m 51.8M 2048M 17.2.0 e1d6a67b021e 9b572ece92f4 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (70s) 17s ago 5m 9084k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (65s) 42s ago 5m 8950k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (6m) 17s ago 6m 56.0M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (6m) 17s ago 6m 53.6M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (6m) 17s ago 6m 49.6M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (6m) 17s ago 6m 50.8M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (5m) 42s ago 5m 53.2M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (5m) 42s ago 5m 52.1M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (5m) 42s ago 5m 50.3M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (5m) 42s ago 5m 50.4M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 starting - - - - 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (4m) 17s ago 4m 92.7M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (4m) 42s ago 4m 93.9M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (4m) 17s ago 4m 93.2M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:53:34.502 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (4m) 42s ago 4m 90.7M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:53:34.793 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 15, 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:53:34.794 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:53:35.017 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:53:35.017 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:53:35.017 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:53:35.017 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:53:35.017 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:53:35.017 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:53:35.017 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:53:35.017 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "2/23 daemons upgraded", 2026-03-10T09:53:35.017 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading mgr daemons", 2026-03-10T09:53:35.018 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:53:35.018 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.051 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4174909456' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.052 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:34 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4174909456' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4174909456' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:35.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:34 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:35.283 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='client.24968 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='client.24974 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='client.25090 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/829893406' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:35 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='client.24968 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='client.24974 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='client.25090 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/829893406' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='client.24968 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='client.24974 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='client.25090 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/829893406' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:35 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.024 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='client.25102 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:36 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='client.25102 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='client.25102 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:36 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.179 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.180 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.180 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:38 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:38.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:38 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.172 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:39 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:53:38 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:53:38] "GET /metrics HTTP/1.1" 200 37772 "" "Prometheus/2.51.0" 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.307 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:39.308 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:39 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:40 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:40 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:41 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:41 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:42 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.383 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:42.384 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:42 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:43 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:43.466 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:43 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:43.519Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:43.520Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:43.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:43.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:44 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.300 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:44.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:44 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.269 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.270 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:45 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:45 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.225 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:46 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:46 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.047 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:46 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:46.953Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:47 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.408 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:47.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:47 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:48 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:48.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:48 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.109 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:53:48 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:53:48] "GET /metrics HTTP/1.1" 200 37772 "" "Prometheus/2.51.0" 2026-03-10T09:53:49.109 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.109 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:49 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:49.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:49 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.111 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.272 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.273 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.273 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.273 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.273 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.273 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.273 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.273 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:50 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:50.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:50.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:50.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:50.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:50.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:50 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:51 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.438 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:51.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:51 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:52.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:52 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.409 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:53:52.410 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:52 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:53 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.522 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:53.520Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:53.522 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:53.520Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:53.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:53 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:53.521Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:53.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:53:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:53:53.521Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:53:54.299 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:54 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:54 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:54.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:54 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:55 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:53:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:55 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:53:56.163 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:56 vm03 ceph-mon[50536]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:56 vm00 ceph-mon[52384]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:56 vm00 ceph-mon[56720]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:53:57.049 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:53:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:53:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:53:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:58 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:53:58 vm03 ceph-mon[50536]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[52384]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:53:58.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:53:58 vm00 ceph-mon[56720]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:53:59.297 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:53:58 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:53:58] "GET /metrics HTTP/1.1" 200 37771 "" "Prometheus/2.51.0" 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: Detected new or changed devices on vm03 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: Failing over to other MGR 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[56720]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: Detected new or changed devices on vm03 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:54:00.305 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:54:00.306 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: Failing over to other MGR 2026-03-10T09:54:00.306 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:54:00.306 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:54:00.306 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:00 vm00 ceph-mon[52384]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T09:54:00.306 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:00] ENGINE Bus STOPPING 2026-03-10T09:54:00.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: Detected new or changed devices on vm03 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: Failing over to other MGR 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: from='mgr.24833 192.168.123.103:0/791981711' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mon[50536]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:00.080+0000 7f3c56139640 -1 mgr handle_mgr_map I was active but no longer am 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ignoring --setuser ceph since I am not root 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ignoring --setgroup ceph since I am not root 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:00 vm03 ceph-mgr[71679]: -- 192.168.123.103:0/4058991006 <== mon.2 v2:192.168.123.103:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x55dff29f74a0 con 0x55dff29d5000 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:00.221+0000 7f8112d93140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:54:00.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:00.268+0000 7f8112d93140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:54:00.619 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:00] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:54:00.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:00] ENGINE Bus STOPPED 2026-03-10T09:54:00.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:00] ENGINE Bus STARTING 2026-03-10T09:54:01.047 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:00 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:00.769+0000 7f8112d93140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:54:01.057 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:00] ENGINE Serving on http://:::9283 2026-03-10T09:54:01.057 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:00] ENGINE Bus STARTED 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: mgrmap e38: y(active, starting, since 0.929192s) 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: Manager daemon y is now available 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:01 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:01.175+0000 7f8112d93140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:01 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T09:54:01.330 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:01 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T09:54:01.331 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:01 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: from numpy import show_config as show_numpy_config 2026-03-10T09:54:01.331 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:01 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:01.283+0000 7f8112d93140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:54:01.331 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:01 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:01.328+0000 7f8112d93140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:54:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:54:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T09:54:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: mgrmap e38: y(active, starting, since 0.929192s) 2026-03-10T09:54:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:54:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:54:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:54:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:54:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: Manager daemon y is now available 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.24833 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: mgrmap e38: y(active, starting, since 0.929192s) 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: Manager daemon y is now available 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:54:01.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:54:01.762 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:01 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:01.414+0000 7f8112d93140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:54:02.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:02 vm00 ceph-mon[52384]: Reconfiguring daemon agent.vm03 on vm03 2026-03-10T09:54:02.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:02 vm00 ceph-mon[52384]: mgrmap e39: y(active, since 1.93562s) 2026-03-10T09:54:02.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:02 vm00 ceph-mon[52384]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:02.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:02.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:02 vm00 ceph-mon[56720]: Reconfiguring daemon agent.vm03 on vm03 2026-03-10T09:54:02.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:02 vm00 ceph-mon[56720]: mgrmap e39: y(active, since 1.93562s) 2026-03-10T09:54:02.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:02 vm00 ceph-mon[56720]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:02.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:02.431 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:02 vm03 ceph-mon[50536]: Reconfiguring daemon agent.vm03 on vm03 2026-03-10T09:54:02.431 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:02 vm03 ceph-mon[50536]: mgrmap e39: y(active, since 1.93562s) 2026-03-10T09:54:02.431 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:02 vm03 ceph-mon[50536]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:02.431 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:02.699 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:02 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:02.452+0000 7f8112d93140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:54:03.001 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:02 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:02.765+0000 7f8112d93140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:54:03.001 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:02 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:02.919+0000 7f8112d93140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:54:03.253 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:03 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:03.020+0000 7f8112d93140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:54:03.254 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:03 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:03.132+0000 7f8112d93140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:54:03.254 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:03.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: [10/Mar/2026:09:54:01] ENGINE Bus STARTING 2026-03-10T09:54:03.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: [10/Mar/2026:09:54:01] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:54:03.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: [10/Mar/2026:09:54:01] ENGINE Client ('192.168.123.100', 51562) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:54:03.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: [10/Mar/2026:09:54:01] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T09:54:03.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: [10/Mar/2026:09:54:01] ENGINE Bus STARTED 2026-03-10T09:54:03.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:54:03.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: Cluster is now healthy 2026-03-10T09:54:03.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:03.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:03 vm03 ceph-mon[50536]: mgrmap e40: y(active, since 3s) 2026-03-10T09:54:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: [10/Mar/2026:09:54:01] ENGINE Bus STARTING 2026-03-10T09:54:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: [10/Mar/2026:09:54:01] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:54:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: [10/Mar/2026:09:54:01] ENGINE Client ('192.168.123.100', 51562) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:54:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: [10/Mar/2026:09:54:01] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T09:54:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: [10/Mar/2026:09:54:01] ENGINE Bus STARTED 2026-03-10T09:54:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:54:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: Cluster is now healthy 2026-03-10T09:54:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[52384]: mgrmap e40: y(active, since 3s) 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: from='client.24899 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: [10/Mar/2026:09:54:01] ENGINE Bus STARTING 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: [10/Mar/2026:09:54:01] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: [10/Mar/2026:09:54:01] ENGINE Client ('192.168.123.100', 51562) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: [10/Mar/2026:09:54:01] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: [10/Mar/2026:09:54:01] ENGINE Bus STARTED 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: Cluster is now healthy 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:03 vm00 ceph-mon[56720]: mgrmap e40: y(active, since 3s) 2026-03-10T09:54:03.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:03 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:03.252+0000 7f8112d93140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:54:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:03.521Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:03.522Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:03.522Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:03.522Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:03.899 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:03 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:03.742+0000 7f8112d93140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:54:03.899 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:03 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:03.894+0000 7f8112d93140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:54:04.418 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:04 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:04.671 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:04 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:04.567+0000 7f8112d93140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T09:54:05.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:05 vm03 ceph-mon[50536]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:05.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:05 vm03 ceph-mon[50536]: mgrmap e41: y(active, since 4s) 2026-03-10T09:54:05.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:05 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:05 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:05 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:05 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.256 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:05 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:05.210+0000 7f8112d93140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:54:05.513 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:05 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:05.285+0000 7f8112d93140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:54:05.513 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:05 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:05.395+0000 7f8112d93140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:54:05.520 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[52384]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[52384]: mgrmap e41: y(active, since 4s) 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[56720]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[56720]: mgrmap e41: y(active, since 4s) 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:05 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:05.521 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:54:05.766 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:05 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:05.588+0000 7f8112d93140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:54:05.766 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:05 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:05.671+0000 7f8112d93140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 8s ago 7m - - 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 running 1s ago 7m - - 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (105s) 8s ago 5m 25.8M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (89s) 1s ago 5m 46.8M - dad864ee21e9 011f2081bf92 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (65s) 8s ago 5m 44.0M - 3.5 e1d6a67b021e 798c3470c0d4 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283 running (115s) 1s ago 7m 218M - 19.2.3-678-ge911bdeb 654f31e6858e 2d74d7e9d583 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (53s) 8s ago 8m 487M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (8m) 8s ago 8m 59.7M 2048M 17.2.0 e1d6a67b021e 9e4f29548d3b 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (7m) 1s ago 7m 50.4M 2048M 17.2.0 e1d6a67b021e 6c31bf149b5f 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (7m) 8s ago 7m 47.1M 2048M 17.2.0 e1d6a67b021e 9b572ece92f4 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (102s) 8s ago 5m 9239k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (96s) 1s ago 5m 9487k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (7m) 8s ago 7m 56.1M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (6m) 8s ago 6m 53.7M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (6m) 8s ago 6m 49.7M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (6m) 8s ago 6m 50.9M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (6m) 1s ago 6m 54.2M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (6m) 1s ago 6m 53.2M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (6m) 1s ago 6m 51.2M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (6m) 1s ago 6m 51.6M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (67s) 1s ago 5m 40.5M - 2.51.0 1d3b7f56885b 983b87a269f7 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (5m) 8s ago 5m 92.9M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (5m) 1s ago 5m 94.3M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (5m) 8s ago 5m 93.5M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:54:05.945 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (5m) 1s ago 5m 91.2M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:54:06.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:05 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:05.763+0000 7f8112d93140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:54:06.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:05 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:05.896+0000 7f8112d93140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 15, 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:54:06.208 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "2/23 daemons upgraded", 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout: "message": "", 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:54:06.430 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:54:06.457 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:06.050+0000 7f8112d93140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:54:06.457 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:06.096+0000 7f8112d93140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:54:06.457 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:54:06] ENGINE Bus STARTING 2026-03-10T09:54:06.457 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: CherryPy Checker: 2026-03-10T09:54:06.457 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: The Application mounted at '' has an empty config. 2026-03-10T09:54:06.458 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: 2026-03-10T09:54:06.458 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:54:06] ENGINE Serving on http://:::9283 2026-03-10T09:54:06.458 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: [10/Mar/2026:09:54:06] ENGINE Bus STARTED 2026-03-10T09:54:06.719 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T09:54:07.052 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='client.25138 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='client.25144 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='client.15252 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: Standby manager daemon x started 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3237305759' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:07.053 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4275075611' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:54:07.200 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:07.200 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='client.25138 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='client.25144 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='client.15252 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: Standby manager daemon x started 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3237305759' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:07.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:07 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4275075611' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:54:07.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='client.25138 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:07.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='client.25144 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:07.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='client.15252 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:07.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:07.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: Standby manager daemon x started 2026-03-10T09:54:07.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/3197002420' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3237305759' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:07 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4275075611' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: from='client.15264 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: mgrmap e42: y(active, since 7s), standbys: x 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: from='client.15264 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: mgrmap e42: y(active, since 7s), standbys: x 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:08 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: from='client.15264 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: mgrmap e42: y(active, since 7s), standbys: x 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:08 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:54:08.919 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:54:08.839+0000 7f8abf377640 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Creating ceph-iscsi config... 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Traceback (most recent call last): 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: return _run_code(code, main_globals, None, 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: exec(code, run_globals) 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Traceback (most recent call last): 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: return self.wait_async( 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: return self.event_loop.get_result(coro, timeout) 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: return future.result(timeout) 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: return self.__get_result() 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: raise self._exception 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: out, err, code = await self._run_cephadm( 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: raise OrchestratorError( 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Creating ceph-iscsi config... 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Traceback (most recent call last): 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: return _run_code(code, main_globals, None, 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: exec(code, run_globals) 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:54:08.920 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:54:08.921 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:08.921 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[52384]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 12 op/s 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[52384]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[52384]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[52384]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[56720]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 12 op/s 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[56720]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:54:09.191 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:54:09.192 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:09.192 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[56720]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:54:09.192 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[56720]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:54:09.192 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:09 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:09.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:09 vm03 ceph-mon[50536]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 12 op/s 2026-03-10T09:54:09.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:09 vm03 ceph-mon[50536]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:54:09.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:09 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:54:09.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:09 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:09.226 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:09 vm03 ceph-mon[50536]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:54:09.227 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:09 vm03 ceph-mon[50536]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:54:09.227 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:09 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:09.227 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[71675]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:54:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.51.0" 2026-03-10T09:54:09.509 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 systemd[1]: Stopping Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.508Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.508Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.508Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.508Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.508Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.508Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.508Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.508Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.509Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.512Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.512Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[78938]: ts=2026-03-10T09:54:09.512Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 podman[97691]: 2026-03-10 09:54:09.520009006 +0000 UTC m=+0.033430240 container died 983b87a269f733bba7b2f3bcee20c09ac626f8ce733dd5778f4fbfb4589cb617 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 podman[97691]: 2026-03-10 09:54:09.535929969 +0000 UTC m=+0.049351214 container remove 983b87a269f733bba7b2f3bcee20c09ac626f8ce733dd5778f4fbfb4589cb617 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 bash[97691]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@prometheus.a.service: Deactivated successfully. 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 systemd[1]: Stopped Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 systemd[1]: Starting Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:54:09.761 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 podman[97756]: 2026-03-10 09:54:09.728099121 +0000 UTC m=+0.022325760 container create fbf1a95f0e5596e8e5da32b49fc8bcd5d6cc5d95f71ea763564bfd18099e7c87 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 podman[97756]: 2026-03-10 09:54:09.769410768 +0000 UTC m=+0.063637407 container init fbf1a95f0e5596e8e5da32b49fc8bcd5d6cc5d95f71ea763564bfd18099e7c87 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 podman[97756]: 2026-03-10 09:54:09.773162841 +0000 UTC m=+0.067389480 container start fbf1a95f0e5596e8e5da32b49fc8bcd5d6cc5d95f71ea763564bfd18099e7c87 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 bash[97756]: fbf1a95f0e5596e8e5da32b49fc8bcd5d6cc5d95f71ea763564bfd18099e7c87 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 podman[97756]: 2026-03-10 09:54:09.717949638 +0000 UTC m=+0.012176277 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 systemd[1]: Started Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.813Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.813Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.813Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm03 (none))" 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.813Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.813Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.815Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.818Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.819Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.819Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.820Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.820Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.082µs 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.820Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.833Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=4 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.839Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=4 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.839Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=4 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.845Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=4 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.845Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=4 maxSegment=4 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.845Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=28.072µs wal_replay_duration=25.574519ms wbl_replay_duration=150ns total_replay_duration=25.694463ms 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.849Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.849Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.849Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.859Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=10.555232ms db_storage=1.603µs remote_storage=1.533µs web_handler=491ns query_engine=752ns scrape=695.732µs scrape_sd=120.537µs notify=9.378µs notify_sd=7.133µs rules=9.20197ms tracing=8.928µs 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.859Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T09:54:10.048 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:09.859Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T09:54:10.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:09] ENGINE Bus STOPPING 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: exec(code, run_globals) 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:54:10.421 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.wait_async( 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: return future.result(timeout) 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.__get_result() 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: raise self._exception 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: out, err, code = await self._run_cephadm( 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: raise OrchestratorError( 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: exec(code, run_globals) 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: Reconfiguring daemon prometheus.a on vm03 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.422 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: exec(code, run_globals) 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.wait_async( 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: return future.result(timeout) 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.__get_result() 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: raise self._exception 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: out, err, code = await self._run_cephadm( 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: raise OrchestratorError( 2026-03-10T09:54:10.423 INFO:journalctl@ceph.mon.c.vm00.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: exec(code, run_globals) 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: Reconfiguring daemon prometheus.a on vm03 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:10] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:10] ENGINE Bus STOPPED 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:10] ENGINE Bus STARTING 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:10] ENGINE Serving on http://:::9283 2026-03-10T09:54:10.424 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:54:10] ENGINE Bus STARTED 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: Creating ceph-iscsi config... 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: Traceback (most recent call last): 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: exec(code, run_globals) 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: Traceback (most recent call last): 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: return self.wait_async( 2026-03-10T09:54:10.491 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: return future.result(timeout) 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: return self.__get_result() 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: raise self._exception 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: out, err, code = await self._run_cephadm( 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: raise OrchestratorError( 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: Creating ceph-iscsi config... 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: Non-zero exit code 1 from systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: Traceback (most recent call last): 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: return _run_code(code, main_globals, None, 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: exec(code, run_globals) 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: RuntimeError: Failed command: systemctl restart ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout: See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: Reconfiguring daemon prometheus.a on vm03 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:10.492 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:10.767 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:10 vm03 systemd[1]: Stopping Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:54:11.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:10 vm03 podman[98009]: 2026-03-10 09:54:10.887259515 +0000 UTC m=+0.048729099 container died 2d74d7e9d58313aeb78de5c1565c04b64e95c3c881307e254e9858120fb7d1e1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS) 2026-03-10T09:54:11.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:10 vm03 podman[98009]: 2026-03-10 09:54:10.913860664 +0000 UTC m=+0.075330248 container remove 2d74d7e9d58313aeb78de5c1565c04b64e95c3c881307e254e9858120fb7d1e1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:54:11.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:10 vm03 bash[98009]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x 2026-03-10T09:54:11.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:10 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-10T09:54:11.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:10 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service: Failed with result 'exit-code'. 2026-03-10T09:54:11.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:10 vm03 systemd[1]: Stopped Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:54:11.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:10 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service: Consumed 1min 7.417s CPU time. 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: Upgrade: Updating mgr.x 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: Deploying daemon mgr.x on vm03 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:11.338 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:11 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:11.339 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:11 vm03 systemd[1]: Starting Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: Upgrade: Updating mgr.x 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: Deploying daemon mgr.x on vm03 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: Upgrade: Updating mgr.x 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:54:11.589 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:54:11.590 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:11.590 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: Deploying daemon mgr.x on vm03 2026-03-10T09:54:11.590 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:11.590 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:11.590 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:11 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:11.639 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:11 vm03 podman[98119]: 2026-03-10 09:54:11.338110146 +0000 UTC m=+0.027757085 container create a32377ac0ce6d05bbbddbd67682a592a437e203e2f902439568d93e5bd7609e7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, RELEASE=HEAD, maintainer=Guillaume Abrioux , GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, io.buildah.version=1.19.8, vcs-type=git, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, com.redhat.component=centos-stream-container, build-date=2022-05-03T08:36:31.336870, vendor=Red Hat, Inc., ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, distribution-scope=public, release=754, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.tags=base centos centos-stream, io.openshift.expose-services=, GIT_BRANCH=HEAD) 2026-03-10T09:54:11.639 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:11 vm03 podman[98119]: 2026-03-10 09:54:11.326907031 +0000 UTC m=+0.016553970 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a 2026-03-10T09:54:11.640 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:11 vm03 podman[98119]: 2026-03-10 09:54:11.440981903 +0000 UTC m=+0.130628851 container init a32377ac0ce6d05bbbddbd67682a592a437e203e2f902439568d93e5bd7609e7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, io.buildah.version=1.19.8, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-type=git, io.openshift.tags=base centos centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_CLEAN=True, release=754, ceph=True, RELEASE=HEAD, io.openshift.expose-services=, version=8, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870) 2026-03-10T09:54:11.640 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:11 vm03 podman[98119]: 2026-03-10 09:54:11.445644097 +0000 UTC m=+0.135291026 container start a32377ac0ce6d05bbbddbd67682a592a437e203e2f902439568d93e5bd7609e7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, release=754, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, version=8, distribution-scope=public, RELEASE=HEAD, vcs-type=git, maintainer=Guillaume Abrioux , io.openshift.tags=base centos centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, build-date=2022-05-03T08:36:31.336870, name=centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8) 2026-03-10T09:54:11.640 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:11 vm03 bash[98119]: a32377ac0ce6d05bbbddbd67682a592a437e203e2f902439568d93e5bd7609e7 2026-03-10T09:54:11.640 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:11 vm03 systemd[1]: Started Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:54:11.640 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:11.569+0000 7f8ffce21000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:54:12.048 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:11.637+0000 7f8ffce21000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:54:12.509 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:12.088+0000 7f8ffce21000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:54:12.797 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:12.507+0000 7f8ffce21000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:54:12.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:12.656+0000 7f8ffce21000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:54:12.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:12.721+0000 7f8ffce21000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: Cluster is now healthy 2026-03-10T09:54:12.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:12 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: Cluster is now healthy 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: Reconfiguring iscsi.foo.vm00.dqkdwh (dependencies changed)... 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: Reconfiguring daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: Cluster is now healthy 2026-03-10T09:54:12.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:12 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.298 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:12.902+0000 7f8ffce21000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 12 op/s 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 742 B/s rd, 0 op/s 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/4133621909' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.871 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:13.522Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:13.871 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:13.523Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:13.871 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:13.524Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:13.871 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:13.524Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 12 op/s 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 742 B/s rd, 0 op/s 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/4133621909' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 12 op/s 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 742 B/s rd, 0 op/s 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/4133621909' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.934 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.935 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.935 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.935 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:13.935 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:13.935 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:13.935 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:13.935 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:13.935 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:13.611+0000 7f8ffce21000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:54:14.264 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:13.965+0000 7f8ffce21000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:54:14.264 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:14.111+0000 7f8ffce21000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:54:14.550 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:14.262+0000 7f8ffce21000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:54:14.550 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:14.428+0000 7f8ffce21000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:14.561+0000 7f8ffce21000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.845 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:14.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:14 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:54:15.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:15.125 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:14 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:15.391 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:15.156+0000 7f8ffce21000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:54:15.391 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:15.274+0000 7f8ffce21000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Cluster is now healthy 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.053 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.054 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.055 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.055 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.055 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.055 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.055 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.055 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.055 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:15 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Cluster is now healthy 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Cluster is now healthy 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:15 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.634 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:16.439+0000 7f8ffce21000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:54:16.634 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:16.543+0000 7f8ffce21000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:54:16.921 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:16.666+0000 7f8ffce21000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:16.864+0000 7f8ffce21000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 15 op/s 2026-03-10T09:54:16.922 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 15 op/s 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:16.923 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:16 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 15 op/s 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 15 op/s 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 15 op/s 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 15 op/s 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 15 op/s 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:16 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.276 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:17 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:17.025+0000 7f8ffce21000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:54:17.276 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:17.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:17 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:17.273+0000 7f8ffce21000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:54:17.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:17 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:17.482+0000 7f8ffce21000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.917 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:17.918 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:17.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:17.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:17 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:18.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:17 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:18.168 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:18.048+0000 7f8ffce21000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:54:18.169 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:18.164+0000 7f8ffce21000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:54:18.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: [10/Mar/2026:09:54:18] ENGINE Bus STARTING 2026-03-10T09:54:18.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: CherryPy Checker: 2026-03-10T09:54:18.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: The Application mounted at '' has an empty config. 2026-03-10T09:54:18.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: 2026-03-10T09:54:18.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: [10/Mar/2026:09:54:18] ENGINE Serving on http://:::9283 2026-03-10T09:54:18.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:54:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[98129]: [10/Mar/2026:09:54:18] ENGINE Bus STARTED 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Standby manager daemon x restarted 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Standby manager daemon x started 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.032 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:18 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Standby manager daemon x restarted 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Standby manager daemon x started 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Standby manager daemon x restarted 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Standby manager daemon x started 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/2' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:18 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.755 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: mgrmap e43: y(active, since 20s), standbys: x 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:19.756 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:19.757 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:19 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: mgrmap e43: y(active, since 20s), standbys: x 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: mgrmap e43: y(active, since 20s), standbys: x 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:20.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:19 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:20 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:20 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:21.123 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:54:20] "GET /metrics HTTP/1.1" 200 37854 "" "Prometheus/2.51.0" 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 13 op/s 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:21 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 13 op/s 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 13 op/s 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:22.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:22.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:22.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:22.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:22.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:21 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 13 op/s 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 13 op/s 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.028 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 13 op/s 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 13 op/s 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.029 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.030 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.031 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:22 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 13 op/s 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 13 op/s 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:22 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:23.524Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:23.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:23.524Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:23.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:23.525Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:23.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:23.525Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.980 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:23.981 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:23 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:23 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.230 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:24.931 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.932 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:24.933 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:24 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:25.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:24 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:25 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.166 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.167 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:26.168 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:25 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v239: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v240: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v241: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v242: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v243: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v244: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v245: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v246: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v247: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v248: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 15 op/s 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v249: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 15 op/s 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v250: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 16 op/s 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: pgmap v251: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 16 op/s 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v239: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v240: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v241: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v242: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v243: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v244: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v245: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v246: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v247: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v248: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 15 op/s 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v249: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 15 op/s 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v250: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 16 op/s 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: pgmap v251: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 16 op/s 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:26 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.230 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:26.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v239: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v240: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v241: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.231 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v242: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v243: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v244: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v245: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v246: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v247: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v248: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 15 op/s 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v249: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 15 op/s 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v250: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 16 op/s 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: pgmap v251: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 16 op/s 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:27.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:27.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:26 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v252: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 16 op/s 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v253: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 16 op/s 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v254: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v255: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v256: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v257: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v258: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v259: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v260: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v261: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v262: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v263: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v264: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v265: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v266: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v267: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v268: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: pgmap v269: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.122 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v252: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 16 op/s 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v253: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 16 op/s 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v254: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v255: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v256: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v257: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v258: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v259: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v260: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v261: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v262: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v263: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v264: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v265: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v266: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v267: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v268: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: pgmap v269: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.124 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:27 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v252: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 16 op/s 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v253: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 16 op/s 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v254: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v255: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v256: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v257: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v258: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v259: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v260: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v261: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v262: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v263: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v264: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v265: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v266: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v267: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v268: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: pgmap v269: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:28.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:27 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v270: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v271: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v272: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v273: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v274: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v275: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v276: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v277: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v278: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v279: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v280: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v281: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v282: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v283: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v284: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v285: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v286: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v287: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v288: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: pgmap v289: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:28 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v270: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v271: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v272: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v273: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v274: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v275: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v276: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v277: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v278: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v279: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v280: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v281: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v282: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v283: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v284: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v285: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v286: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v287: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v288: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: pgmap v289: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v270: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v271: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v272: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v273: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v274: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v275: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v276: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v277: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v278: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v279: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v280: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v281: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v282: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v283: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v284: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v285: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v286: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v287: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v288: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: pgmap v289: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:29.374 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:29.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:29.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:29.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:29.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:28 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v290: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v291: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v292: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v293: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v294: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v295: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v296: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v297: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v298: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v299: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v300: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v301: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v302: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v303: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v304: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v305: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v306: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v307: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v308: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v309: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v310: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: pgmap v311: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.301 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:29 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v290: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v291: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v292: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v293: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v294: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v295: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v296: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v297: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v298: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v299: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v300: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v301: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v302: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v303: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v304: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v305: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v306: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v307: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v308: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v309: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v310: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: pgmap v311: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v290: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v291: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v292: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v293: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v294: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v295: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v296: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v297: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v298: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v299: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v300: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v301: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v302: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v303: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v304: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v305: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v306: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v307: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v308: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v309: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v310: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: pgmap v311: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:29 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v312: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v313: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v314: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v315: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v316: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v317: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v318: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v319: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v320: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v321: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v322: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v323: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v324: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v325: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.913 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v326: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v327: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v328: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: pgmap v329: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v312: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v313: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v314: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v315: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v316: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.914 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v317: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v318: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v319: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v320: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v321: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v322: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v323: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v324: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v325: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v326: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v327: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v328: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: pgmap v329: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:30.915 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:54:30] "GET /metrics HTTP/1.1" 200 37931 "" "Prometheus/2.51.0" 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v312: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v313: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v314: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v315: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v316: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v317: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v318: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v319: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v320: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v321: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v322: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v323: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v324: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v325: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v326: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v327: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v328: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: pgmap v329: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:30 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:31.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:30 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v330: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v331: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v332: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v333: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v334: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v335: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v336: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v337: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v338: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v339: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v340: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v341: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v342: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.026 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v343: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 8.6 KiB/s rd, 8 op/s 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: pgmap v344: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v330: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v331: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v332: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v333: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v334: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v335: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v336: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v337: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v338: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.027 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v339: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v340: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v341: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v342: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v343: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 8.6 KiB/s rd, 8 op/s 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: pgmap v344: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.028 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:31 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v330: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v331: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v332: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v333: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v334: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v335: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v336: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v337: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v338: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v339: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v340: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v341: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v342: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v343: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 8.6 KiB/s rd, 8 op/s 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: pgmap v344: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 10 op/s 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.050 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:31 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v345: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 8.4 KiB/s rd, 8 op/s 2026-03-10T09:54:32.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v346: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 11 op/s 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v347: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 9.0 KiB/s rd, 8 op/s 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v348: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 11 op/s 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v349: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v350: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v351: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v352: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v353: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v354: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: pgmap v355: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.937 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v345: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 8.4 KiB/s rd, 8 op/s 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v346: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 11 op/s 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v347: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 9.0 KiB/s rd, 8 op/s 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v348: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 11 op/s 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v349: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v350: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.938 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v345: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 8.4 KiB/s rd, 8 op/s 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v346: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 11 op/s 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v347: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 9.0 KiB/s rd, 8 op/s 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v348: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 11 op/s 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v349: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v350: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v351: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v352: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v353: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.961 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v354: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: pgmap v355: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:32.962 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:32 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v351: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v352: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v353: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v354: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: pgmap v355: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:33.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:32 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:33.706 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:33.524Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:33.707 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:33.525Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:33.707 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:33.525Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:33.707 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:33.525Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:34.255 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:34.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:34.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v356: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v357: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v358: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v359: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.255 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v360: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v361: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v362: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v363: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v364: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v365: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v366: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v367: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v368: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: pgmap v369: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.256 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.257 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:33 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v356: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v357: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v358: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v359: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v360: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v361: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v362: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v363: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v364: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.324 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v365: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v366: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v367: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v368: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: pgmap v369: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.325 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v356: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v357: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v358: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v359: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v360: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v361: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v362: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v363: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v364: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v365: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v366: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v367: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.326 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v368: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: pgmap v369: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:34.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:33 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v370: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v371: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v372: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v373: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v374: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v375: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v376: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v377: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v378: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.131 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v379: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v380: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: pgmap v381: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.132 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:35 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v370: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v371: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v372: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v373: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v374: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v375: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v376: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v377: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v378: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v379: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v380: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: pgmap v381: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v370: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v371: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v372: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v373: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v374: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v375: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v376: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v377: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v378: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v379: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v380: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: pgmap v381: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:35.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:35.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:35 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.275 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: pgmap v382: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Cluster is now healthy 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.276 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.277 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:36 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: pgmap v382: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Cluster is now healthy 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: pgmap v382: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Cluster is now healthy 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:36.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:36 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:36.948 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:37.029 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.304 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:37 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:37.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:37 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 3s ago 8m - - 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 33s ago 7m - - 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (2m) 3s ago 6m 25.8M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (2m) 33s ago 5m 46.8M - dad864ee21e9 011f2081bf92 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (25s) 3s ago 5m 43.6M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 starting - - - - 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (85s) 3s ago 8m 554M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (8m) 3s ago 8m 73.2M 2048M 17.2.0 e1d6a67b021e 9e4f29548d3b 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (7m) 33s ago 7m 50.4M 2048M 17.2.0 e1d6a67b021e 6c31bf149b5f 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (7m) 3s ago 7m 53.1M 2048M 17.2.0 e1d6a67b021e 9b572ece92f4 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (2m) 3s ago 6m 9433k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (2m) 33s ago 6m 9487k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (7m) 3s ago 7m 56.7M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (7m) 3s ago 7m 54.2M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (7m) 3s ago 7m 50.2M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (7m) 3s ago 7m 51.3M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (7m) 33s ago 7m 54.2M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (6m) 33s ago 6m 53.2M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (6m) 33s ago 6m 51.2M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (6m) 33s ago 6m 51.6M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 starting - - - - 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (5m) 3s ago 5m 93.4M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:54:37.580 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (5m) 33s ago 5m 94.3M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:54:37.581 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (5m) 3s ago 5m 94.0M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:54:37.581 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (5m) 33s ago 5m 91.2M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:54:37.850 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:54:37.850 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 16, 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:54:37.851 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:54:38.024 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.024 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.024 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.024 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.024 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: pgmap v383: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.9 KiB/s rd, 1 op/s 2026-03-10T09:54:38.024 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='client.15297 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='client.25195 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.025 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.117 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: pgmap v383: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.9 KiB/s rd, 1 op/s 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='client.15297 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='client.25195 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3129936982' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.119 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:54:38.119 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:54:38.119 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:54:38.119 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:54:38.119 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T09:54:38.119 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "", 2026-03-10T09:54:38.119 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading mgr daemons", 2026-03-10T09:54:38.119 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:54:38.119 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: pgmap v383: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.9 KiB/s rd, 1 op/s 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='client.15297 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='client.25195 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3129936982' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:38 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3129936982' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:38.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:38 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:38.399 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T09:54:39.028 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.028 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='client.25198 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='client.25210 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1614922176' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.029 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.030 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:39 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='client.25198 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='client.25210 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1614922176' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='client.25198 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='client.25210 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1614922176' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:39.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:39 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: pgmap v384: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.031 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:40 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: pgmap v384: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: pgmap v384: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:40.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:40.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:40.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:40.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:40.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:40 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.033 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:54:40] "GET /metrics HTTP/1.1" 200 37931 "" "Prometheus/2.51.0" 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:41 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:41.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:41.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:41 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: pgmap v385: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 807 B/s rd, 0 op/s 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:42 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: pgmap v385: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 807 B/s rd, 0 op/s 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: pgmap v385: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 807 B/s rd, 0 op/s 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:42.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:42 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:43 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.435 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.436 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:43.437 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:43 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:43.525Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:43.525Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:43.526Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:43.526Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:44.232 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: pgmap v386: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.233 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.234 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:44 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: pgmap v386: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: pgmap v386: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:44.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:44.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:44 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.325 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.326 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:45 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:45.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:45.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:45 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: pgmap v387: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1007 B/s rd, 0 op/s 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.149 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.150 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.151 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.151 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.151 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.151 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.151 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:46 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: pgmap v387: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1007 B/s rd, 0 op/s 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: pgmap v387: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1007 B/s rd, 0 op/s 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:46.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:46 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.105 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.105 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:46 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.230 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:47 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:47.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:47 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: pgmap v388: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.120 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.121 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:48 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: pgmap v388: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: pgmap v388: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:48.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:48 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.202 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:49 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:49.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:49 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: pgmap v389: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.386 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.387 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:50 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: pgmap v389: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: pgmap v389: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:50.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:50 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:54:50 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:54:50] "GET /metrics HTTP/1.1" 200 37916 "" "Prometheus/2.51.0" 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.366 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.367 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:51 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:51.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:51 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.394 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: pgmap v390: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.395 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:52 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: pgmap v390: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.426 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: pgmap v390: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.427 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:52.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:52 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.347 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.348 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:53 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:53.498 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:53 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:53.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:53.526Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:53.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:53.526Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:53.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:53.526Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:53.619 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:54:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:54:53.527Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:54:54.444 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:54 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: pgmap v391: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.444 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.445 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:54 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: pgmap v391: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: pgmap v391: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:54.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:54 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:55 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:55.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:55 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: pgmap v392: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:56 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: pgmap v392: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: pgmap v392: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:56.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:56 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.210 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.210 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.210 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.210 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.210 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.210 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.211 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.212 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:57 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.212 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:54:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:54:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:57.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:57.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:57 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.427 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.427 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.427 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: pgmap v393: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:58.427 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.427 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.427 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.428 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.429 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:58 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: pgmap v393: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: pgmap v393: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:58.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:58 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.465 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.465 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.465 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.465 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.465 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:54:59 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:54:59.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:54:59 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: pgmap v394: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:00 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: pgmap v394: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: pgmap v394: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:00.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:00.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:00.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:00.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:00.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:00 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:55:00] "GET /metrics HTTP/1.1" 200 37924 "" "Prometheus/2.51.0" 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.185 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:01 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:01.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:01.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:01.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:01.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:01.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:01 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: pgmap v395: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:55:02.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.186 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.187 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:02 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: pgmap v395: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: pgmap v395: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:02.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:02 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[52384]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[52384]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:03.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[56720]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:03 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:03 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:03 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:03 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:03 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:03 vm03 ceph-mon[50536]: Upgrade: 1 mgr daemon(s) are 17.2.0 != target 19.2.3-678-ge911bdeb 2026-03-10T09:55:03.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:03 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:03.526Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:03.527Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:03.527Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:03.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:03.528Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:04 vm03 ceph-mon[50536]: pgmap v396: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:04 vm03 ceph-mon[50536]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:04 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:04 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:04.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:04 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:04.549 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:04 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[52384]: pgmap v396: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[52384]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:04.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[56720]: pgmap v396: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[56720]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:04.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:04 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.180 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: pgmap v397: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:06.180 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.180 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.180 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:06.180 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:06.180 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: pgmap v398: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T09:55:06.180 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:06 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:05 vm03 systemd[1]: Stopping Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 podman[119399]: 2026-03-10 09:55:06.036633871 +0000 UTC m=+0.052963084 container died a32377ac0ce6d05bbbddbd67682a592a437e203e2f902439568d93e5bd7609e7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, GIT_BRANCH=HEAD, ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, RELEASE=HEAD, com.redhat.component=centos-stream-container, io.openshift.expose-services=, release=754, io.openshift.tags=base centos centos-stream, architecture=x86_64, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, maintainer=Guillaume Abrioux , name=centos-stream, vcs-type=git, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vendor=Red Hat, Inc., version=8, CEPH_POINT_RELEASE=-17.2.0) 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 podman[119399]: 2026-03-10 09:55:06.110022174 +0000 UTC m=+0.126351387 container remove a32377ac0ce6d05bbbddbd67682a592a437e203e2f902439568d93e5bd7609e7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, release=754, version=8, CEPH_POINT_RELEASE=-17.2.0, io.buildah.version=1.19.8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, vendor=Red Hat, Inc., GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , io.openshift.tags=base centos centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.component=centos-stream-container, ceph=True, GIT_CLEAN=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, distribution-scope=public, architecture=x86_64) 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 bash[119399]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x 2026-03-10T09:55:06.181 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: pgmap v397: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: pgmap v398: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:06.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: pgmap v397: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: pgmap v398: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:55:06.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:06 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:06.461 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service: Failed with result 'exit-code'. 2026-03-10T09:55:06.461 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 systemd[1]: Stopped Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:06.461 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service: Consumed 5.656s CPU time. 2026-03-10T09:55:06.461 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 systemd[1]: Starting Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:06.714 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 podman[119507]: 2026-03-10 09:55:06.460664273 +0000 UTC m=+0.018256184 container create 37927dd471012d06d8d538cceaa66b220c1d09d659c8017b1e664462a3f62a1a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS) 2026-03-10T09:55:06.715 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 podman[119507]: 2026-03-10 09:55:06.453404136 +0000 UTC m=+0.010996047 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:55:06.715 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 podman[119507]: 2026-03-10 09:55:06.555815468 +0000 UTC m=+0.113407379 container init 37927dd471012d06d8d538cceaa66b220c1d09d659c8017b1e664462a3f62a1a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223) 2026-03-10T09:55:06.715 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 podman[119507]: 2026-03-10 09:55:06.558498064 +0000 UTC m=+0.116089975 container start 37927dd471012d06d8d538cceaa66b220c1d09d659c8017b1e664462a3f62a1a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:55:06.715 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 bash[119507]: 37927dd471012d06d8d538cceaa66b220c1d09d659c8017b1e664462a3f62a1a 2026-03-10T09:55:06.715 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 systemd[1]: Started Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:06.715 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:06.667+0000 7fc8b5e49140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:55:06.715 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:06.712+0000 7fc8b5e49140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:55:07.053 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:06.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:07.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:07 vm00 ceph-mon[52384]: Upgrade: Updating mgr.x 2026-03-10T09:55:07.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:07 vm00 ceph-mon[52384]: Deploying daemon mgr.x on vm03 2026-03-10T09:55:07.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:07 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:07.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:07 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:07 vm00 ceph-mon[56720]: Upgrade: Updating mgr.x 2026-03-10T09:55:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:07 vm00 ceph-mon[56720]: Deploying daemon mgr.x on vm03 2026-03-10T09:55:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:07 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:07.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:07 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:07.401 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:07 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:07.183+0000 7fc8b5e49140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:55:07.401 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:07 vm03 ceph-mon[50536]: Upgrade: Updating mgr.x 2026-03-10T09:55:07.401 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:07 vm03 ceph-mon[50536]: Deploying daemon mgr.x on vm03 2026-03-10T09:55:07.401 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:07 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:07.401 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:07 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:07.651 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:07 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:07.555+0000 7fc8b5e49140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:55:07.651 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:07 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T09:55:07.651 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:07 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T09:55:07.651 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:07 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: from numpy import show_config as show_numpy_config 2026-03-10T09:55:07.979 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:07 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:07.656+0000 7fc8b5e49140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:55:07.980 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:07 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:07.695+0000 7fc8b5e49140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:55:07.980 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:07 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:07.777+0000 7fc8b5e49140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:55:08.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:08 vm03 ceph-mon[50536]: pgmap v399: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T09:55:08.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:08 vm00 ceph-mon[52384]: pgmap v399: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T09:55:08.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:08 vm00 ceph-mon[56720]: pgmap v399: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T09:55:08.657 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:08.382+0000 7fc8b5e49140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:55:08.657 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:08.517+0000 7fc8b5e49140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:55:08.657 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:08.569+0000 7fc8b5e49140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:55:08.657 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:08.610+0000 7fc8b5e49140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:55:08.657 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:08.655+0000 7fc8b5e49140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:55:08.662 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:55:08.934 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:08.699+0000 7fc8b5e49140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:55:08.934 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:08.929+0000 7fc8b5e49140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:55:09.091 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 16s ago 8m - - 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 running 0s ago 8m - - 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (2m) 16s ago 6m 25.9M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (2m) 0s ago 6m 47.0M - dad864ee21e9 011f2081bf92 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (56s) 16s ago 6m 45.7M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (2s) 0s ago 8m 53.0M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (116s) 16s ago 9m 556M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (9m) 16s ago 9m 75.8M 2048M 17.2.0 e1d6a67b021e 9e4f29548d3b 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (8m) 0s ago 8m 54.7M 2048M 17.2.0 e1d6a67b021e 6c31bf149b5f 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (8m) 16s ago 8m 55.3M 2048M 17.2.0 e1d6a67b021e 9b572ece92f4 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (2m) 16s ago 6m 9537k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (2m) 0s ago 6m 9903k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (8m) 16s ago 8m 56.8M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (7m) 16s ago 7m 54.3M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (7m) 16s ago 7m 50.3M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (7m) 16s ago 7m 51.4M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (7m) 0s ago 7m 54.4M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (7m) 0s ago 7m 53.5M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (7m) 0s ago 7m 51.5M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (7m) 0s ago 7m 51.9M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (59s) 0s ago 6m 43.9M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (6m) 16s ago 6m 93.4M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (6m) 0s ago 6m 94.8M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (6m) 16s ago 6m 94.0M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:55:09.092 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (6m) 0s ago 6m 91.7M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:55:09.283 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:09 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.283 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:09 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.283 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:09 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.283 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:09 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.283 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:08.996+0000 7fc8b5e49140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:55:09.283 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:09.281+0000 7fc8b5e49140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T09:55:09.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:09 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:09 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:09 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.349 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:09 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.349 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:09 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.350 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:09 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.350 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:09 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.350 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:09 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 16, 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:55:09.350 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "2/23 daemons upgraded", 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading mgr daemons", 2026-03-10T09:55:09.566 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:55:09.567 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:55:09.827 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:55:09.827 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:55:09.827 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:55:09.941 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:09.607+0000 7fc8b5e49140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:55:09.942 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:09.658+0000 7fc8b5e49140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:55:09.942 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:09.704+0000 7fc8b5e49140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:55:09.942 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:09.797+0000 7fc8b5e49140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:55:09.942 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:09.850+0000 7fc8b5e49140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='client.25228 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='client.25231 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='client.25237 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: pgmap v400: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/3088101014' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='client.? 192.168.123.100:0/1386589063' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='client.25228 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='client.25231 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='client.25237 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: pgmap v400: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/3088101014' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='client.? 192.168.123.100:0/1386589063' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:10.123 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:10.124 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:10.124 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T09:55:10.124 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-10T09:55:10.124 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:10.124 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-mon[52384]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:09.940+0000 7fc8b5e49140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:10.062+0000 7fc8b5e49140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='client.25228 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='client.25231 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='client.25237 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: pgmap v400: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/3088101014' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='client.? 192.168.123.100:0/1386589063' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:10.207 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:10 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-10T09:55:10.547 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:10.206+0000 7fc8b5e49140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:55:10.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:10.242+0000 7fc8b5e49140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:55:10.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: [10/Mar/2026:09:55:10] ENGINE Bus STARTING 2026-03-10T09:55:10.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: CherryPy Checker: 2026-03-10T09:55:10.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: The Application mounted at '' has an empty config. 2026-03-10T09:55:10.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:10.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: [10/Mar/2026:09:55:10] ENGINE Serving on http://:::9283 2026-03-10T09:55:10.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: [10/Mar/2026:09:55:10] ENGINE Bus STARTED 2026-03-10T09:55:10.825 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 systemd[1]: Stopping Ceph mon.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:10.825 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:55:10] "GET /metrics HTTP/1.1" 200 37924 "" "Prometheus/2.51.0" 2026-03-10T09:55:11.096 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-a[52380]: 2026-03-10T09:55:10.824+0000 7efc8ebfd700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:55:11.096 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-a[52380]: 2026-03-10T09:55:10.824+0000 7efc8ebfd700 -1 mon.a@0(leader) e3 *** Got Signal Terminated *** 2026-03-10T09:55:11.097 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:10] ENGINE Bus STOPPING 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 podman[118432]: 2026-03-10 09:55:11.097340347 +0000 UTC m=+0.300952614 container died 9e4f29548d3ba42f8752b69f0df1623c113155be3dbe274ac6364f6fa860fdc9 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-a, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.tags=base centos centos-stream, build-date=2022-05-03T08:36:31.336870, release=754, RELEASE=HEAD, com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, distribution-scope=public, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, name=centos-stream, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vendor=Red Hat, Inc., GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754) 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 podman[118432]: 2026-03-10 09:55:11.121371809 +0000 UTC m=+0.324984085 container remove 9e4f29548d3ba42f8752b69f0df1623c113155be3dbe274ac6364f6fa860fdc9 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-a, GIT_CLEAN=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.buildah.version=1.19.8, vendor=Red Hat, Inc., maintainer=Guillaume Abrioux , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, release=754, build-date=2022-05-03T08:36:31.336870, ceph=True, GIT_BRANCH=HEAD, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, io.openshift.expose-services=, architecture=x86_64, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, vcs-type=git, RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, name=centos-stream) 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 bash[118432]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-a 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.a.service: Deactivated successfully. 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 systemd[1]: Stopped Ceph mon.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.a.service: Consumed 14.460s CPU time. 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:11] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:11] ENGINE Bus STOPPED 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:11] ENGINE Bus STARTING 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:11] ENGINE Serving on http://:::9283 2026-03-10T09:55:11.370 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:11] ENGINE Bus STARTED 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 systemd[1]: Starting Ceph mon.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 podman[118560]: 2026-03-10 09:55:11.51369564 +0000 UTC m=+0.034380975 container create 5f537ed367b077e4c4f87f4f99601e3a5c5ec98dda14a06573ebb69ae5696100 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-a, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS) 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 podman[118560]: 2026-03-10 09:55:11.54890203 +0000 UTC m=+0.069587374 container init 5f537ed367b077e4c4f87f4f99601e3a5c5ec98dda14a06573ebb69ae5696100 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-a, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid) 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 podman[118560]: 2026-03-10 09:55:11.553641602 +0000 UTC m=+0.074326936 container start 5f537ed367b077e4c4f87f4f99601e3a5c5ec98dda14a06573ebb69ae5696100 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-a, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 bash[118560]: 5f537ed367b077e4c4f87f4f99601e3a5c5ec98dda14a06573ebb69ae5696100 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 podman[118560]: 2026-03-10 09:55:11.503710129 +0000 UTC m=+0.024395484 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 systemd[1]: Started Ceph mon.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: pidfile_write: ignore empty --pid-file 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: load: jerasure load: lrc 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: RocksDB version: 7.9.2 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Git sha 0 2026-03-10T09:55:11.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: DB SUMMARY 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: DB Session ID: 2FYYFPZ8S7MEOUIQ5OP2 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: CURRENT file: CURRENT 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: MANIFEST file: MANIFEST-000015 size: 1770 Bytes 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000042.sst 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000040.log size: 1885687 ; 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.error_if_exists: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.create_if_missing: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.paranoid_checks: 1 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.env: 0x562a01b2ddc0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.info_log: 0x562a042137e0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.statistics: (nil) 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.use_fsync: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_log_file_size: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.allow_fallocate: 1 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.use_direct_reads: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.db_log_dir: 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.wal_dir: 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.write_buffer_manager: 0x562a04217900 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.unordered_write: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.row_cache: None 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.wal_filter: None 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.two_write_queues: 0 2026-03-10T09:55:11.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.wal_compression: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.atomic_flush: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.log_readahead_size: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_background_jobs: 2 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_background_compactions: -1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_subcompactions: 1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_open_files: -1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_background_flushes: -1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Compression algorithms supported: 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: kZSTD supported: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: kXpressCompression supported: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: kBZip2Compression supported: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: kLZ4Compression supported: 1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: kZlibCompression supported: 1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: kSnappyCompression supported: 1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.merge_operator: 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_filter: None 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x562a04213440) 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: cache_index_and_filter_blocks: 1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: pin_top_level_index_and_filter: 1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: index_type: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: data_block_index_type: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: index_shortening: 1 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: checksum: 4 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: no_block_cache: 0 2026-03-10T09:55:11.661 INFO:journalctl@ceph.mon.a.vm00.stdout: block_cache: 0x562a042369b0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: block_cache_name: BinnedLRUCache 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: block_cache_options: 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: capacity : 536870912 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: num_shard_bits : 4 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: strict_capacity_limit : 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: high_pri_pool_ratio: 0.000 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: block_cache_compressed: (nil) 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: persistent_cache: (nil) 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: block_size: 4096 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: block_size_deviation: 10 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: block_restart_interval: 16 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: index_block_restart_interval: 1 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: metadata_block_size: 4096 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: partition_filters: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: use_delta_encoding: 1 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: filter_policy: bloomfilter 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: whole_key_filtering: 1 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: verify_compression: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: read_amp_bytes_per_bit: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: format_version: 5 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: enable_index_compression: 1 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: block_align: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: max_auto_readahead_size: 262144 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: prepopulate_block_cache: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: initial_auto_readahead_size: 8192 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression: NoCompression 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.num_levels: 7 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T09:55:11.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.inplace_update_support: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.bloom_locality: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.max_successive_merges: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.ttl: 2592000 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.enable_blob_files: false 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.min_blob_size: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 42.sst 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 succeeded,manifest_file_number is 15, next_file_number is 44, last_sequence is 23746, log_number is 40,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 40 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: c9e68c27-84cc-4bc7-bd4b-1270866b1364 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136511588254, "job": 1, "event": "recovery_started", "wal_files": [40]} 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #40 mode 2 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136511599381, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 45, "file_size": 1750822, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 23747, "largest_seqno": 26435, "table_properties": {"data_size": 1741544, "index_size": 3888, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 4357, "raw_key_size": 40086, "raw_average_key_size": 23, "raw_value_size": 1714260, "raw_average_value_size": 988, "num_data_blocks": 157, "num_entries": 1734, "num_filter_entries": 1734, "num_deletions": 290, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773136511, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "c9e68c27-84cc-4bc7-bd4b-1270866b1364", "db_session_id": "2FYYFPZ8S7MEOUIQ5OP2", "orig_file_number": 45, "seqno_to_time_mapping": "N/A"}} 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136511599623, "job": 1, "event": "recovery_finished"} 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/version_set.cc:5047] Creating manifest 47 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T09:55:11.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000040.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x562a04238e00 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: rocksdb: DB pointer 0x562a04248000 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: starting mon.a rank 0 at public addrs [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] at bind addrs [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: mon.a@-1(???) e3 preinit fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: mon.a@-1(???).mds e1 new map 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: mon.a@-1(???).mds e1 print_map 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout: e1 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout: legacy client fscid: -1 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout: 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout: No filesystems configured 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: mon.a@-1(???).osd e95 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: mon.a@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: mon.a@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: mon.a@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T09:55:11.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:11 vm00 ceph-mon[118593]: mon.a@-1(???).paxosservice(auth 1..23) refresh upgraded, format 0 -> 3 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: pgmap v403: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: mon.a calling monitor election 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: monmap epoch 3 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: min_mon_release 17 (quincy) 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: election_strategy: 1 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: fsmap 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: mgrmap e43: y(active, since 74s), standbys: x 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: Health detail: HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: [WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[118593]: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ignoring --setuser ceph since I am not root 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ignoring --setgroup ceph since I am not root 2026-03-10T09:55:13.667 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:13.527Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:13.667 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:13.527Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:13.667 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:13.528Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:13.667 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:13.528Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:55:13.667 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: pgmap v403: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: mon.a calling monitor election 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: monmap epoch 3 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: min_mon_release 17 (quincy) 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: election_strategy: 1 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: fsmap 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: mgrmap e43: y(active, since 74s), standbys: x 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: Health detail: HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: [WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:55:13.668 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:13 vm00 ceph-mon[56720]: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: ignoring --setuser ceph since I am not root 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: ignoring --setgroup ceph since I am not root 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:13.601+0000 7f14a7208140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:13.650+0000 7f14a7208140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: from='mgr.15186 192.168.123.100:0/2601180641' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: pgmap v403: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: mon.a calling monitor election 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: monmap epoch 3 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: min_mon_release 17 (quincy) 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: election_strategy: 1 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: fsmap 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: mgrmap e43: y(active, since 74s), standbys: x 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: Health detail: HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:55:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: [WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:55:13.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:13 vm03 ceph-mon[50536]: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:55:13.956 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:13.672+0000 7f824510c140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T09:55:13.956 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:13.718+0000 7f824510c140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T09:55:14.293 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:14.186+0000 7f824510c140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:55:14.444 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:14.444 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:14.095+0000 7f14a7208140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T09:55:14.444 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:14.441+0000 7f14a7208140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:55:14.551 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[118593]: from='mgr.15186 ' entity='' 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[118593]: Standby manager daemon x restarted 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[118593]: Standby manager daemon x started 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[118593]: mgrmap e44: y(active, since 74s), standbys: x 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[118593]: from='mgr.15186 ' entity='mgr.y' 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[56720]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[56720]: from='mgr.15186 ' entity='' 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[56720]: Standby manager daemon x restarted 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[56720]: Standby manager daemon x started 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[56720]: mgrmap e44: y(active, since 74s), standbys: x 2026-03-10T09:55:14.552 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:14 vm00 ceph-mon[56720]: from='mgr.15186 ' entity='mgr.y' 2026-03-10T09:55:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:14 vm03 ceph-mon[50536]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:14 vm03 ceph-mon[50536]: from='mgr.15186 ' entity='' 2026-03-10T09:55:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:14 vm03 ceph-mon[50536]: Standby manager daemon x restarted 2026-03-10T09:55:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:14 vm03 ceph-mon[50536]: Standby manager daemon x started 2026-03-10T09:55:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:14 vm03 ceph-mon[50536]: mgrmap e44: y(active, since 74s), standbys: x 2026-03-10T09:55:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:14 vm03 ceph-mon[50536]: from='mgr.15186 ' entity='mgr.y' 2026-03-10T09:55:14.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T09:55:14.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T09:55:14.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: from numpy import show_config as show_numpy_config 2026-03-10T09:55:14.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:14.543+0000 7f14a7208140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:55:14.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:14.581+0000 7f14a7208140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:55:14.799 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:14.667+0000 7f14a7208140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:55:14.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:14.551+0000 7f824510c140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T09:55:14.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T09:55:14.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T09:55:14.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: from numpy import show_config as show_numpy_config 2026-03-10T09:55:14.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:14.650+0000 7f824510c140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T09:55:14.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:14.692+0000 7f824510c140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T09:55:14.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:14.768+0000 7f824510c140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T09:55:15.503 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:15.226+0000 7f14a7208140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:55:15.503 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:15.343+0000 7f14a7208140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:55:15.503 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:15.386+0000 7f14a7208140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:55:15.503 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:15.421+0000 7f14a7208140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:55:15.503 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:15.463+0000 7f14a7208140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:55:15.503 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:15.501+0000 7f14a7208140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:55:15.583 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:15.314+0000 7f824510c140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T09:55:15.583 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:15.426+0000 7f824510c140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:55:15.583 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:15.466+0000 7f824510c140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T09:55:15.583 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:15.503+0000 7f824510c140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T09:55:15.583 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:15.545+0000 7f824510c140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T09:55:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:15 vm03 ceph-mon[50536]: mgrmap e45: y(active, since 75s), standbys: x 2026-03-10T09:55:15.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:15.676+0000 7f14a7208140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:55:15.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:15.727+0000 7f14a7208140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:55:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:15 vm00 ceph-mon[118593]: mgrmap e45: y(active, since 75s), standbys: x 2026-03-10T09:55:15.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:15 vm00 ceph-mon[56720]: mgrmap e45: y(active, since 75s), standbys: x 2026-03-10T09:55:15.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:15.583+0000 7f824510c140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T09:55:15.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:15.763+0000 7f824510c140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T09:55:15.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:15 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:15.819+0000 7f824510c140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T09:55:16.253 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:15.954+0000 7f14a7208140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T09:55:16.342 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.050+0000 7f824510c140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T09:55:16.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:16.252+0000 7f14a7208140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:55:16.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:16.292+0000 7f14a7208140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:55:16.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:16.336+0000 7f14a7208140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:55:16.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:16.426+0000 7f14a7208140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:55:16.548 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:16.470+0000 7f14a7208140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:55:16.619 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.342+0000 7f824510c140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T09:55:16.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.382+0000 7f824510c140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T09:55:16.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.427+0000 7f824510c140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T09:55:16.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.521+0000 7f824510c140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T09:55:16.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.563+0000 7f824510c140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T09:55:16.830 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:16.556+0000 7f14a7208140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:55:16.830 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:16.679+0000 7f14a7208140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:55:16.915 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.655+0000 7f824510c140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T09:55:16.915 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.773+0000 7f824510c140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T09:55:17.200 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[118593]: Standby manager daemon x restarted 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[118593]: Standby manager daemon x started 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[118593]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[118593]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[118593]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[118593]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[56720]: Standby manager daemon x restarted 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[56720]: Standby manager daemon x started 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:16 vm00 ceph-mon[56720]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.914+0000 7f824510c140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:16.958+0000 7f824510c140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:17] ENGINE Bus STARTING 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: CherryPy Checker: 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: The Application mounted at '' has an empty config. 2026-03-10T09:55:17.201 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:17.298 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:16 vm03 ceph-mon[50536]: Standby manager daemon x restarted 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:16 vm03 ceph-mon[50536]: Standby manager daemon x started 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:16 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:16 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:16 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:16 vm03 ceph-mon[50536]: from='mgr.? 192.168.123.103:0/2249018113' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:16.828+0000 7f14a7208140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:16.869+0000 7f14a7208140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: [10/Mar/2026:09:55:16] ENGINE Bus STARTING 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: CherryPy Checker: 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: The Application mounted at '' has an empty config. 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: [10/Mar/2026:09:55:16] ENGINE Serving on http://:::9283 2026-03-10T09:55:17.299 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 09:55:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-x[119517]: [10/Mar/2026:09:55:16] ENGINE Bus STARTED 2026-03-10T09:55:17.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:17] ENGINE Serving on http://:::9283 2026-03-10T09:55:17.620 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:17 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:17] ENGINE Bus STARTED 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: mgrmap e46: y(active, since 77s), standbys: x 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: Active manager daemon y restarted 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: Activating manager daemon y 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: mgrmap e47: y(active, starting, since 0.00818203s), standbys: x 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: Manager daemon y is now available 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:55:18.172 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:18 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:55:18.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: mgrmap e46: y(active, since 77s), standbys: x 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: Active manager daemon y restarted 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: Activating manager daemon y 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: mgrmap e47: y(active, starting, since 0.00818203s), standbys: x 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: Manager daemon y is now available 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: mgrmap e46: y(active, since 77s), standbys: x 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: Active manager daemon y restarted 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: Activating manager daemon y 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: mgrmap e47: y(active, starting, since 0.00818203s), standbys: x 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:55:18.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: Manager daemon y is now available 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:18 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T09:55:18.621 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:18.368+0000 7f8211476640 -1 mgr.server handle_report got status from non-daemon mon.a 2026-03-10T09:55:19.369 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:19 vm03 ceph-mon[50536]: [10/Mar/2026:09:55:17] ENGINE Bus STARTING 2026-03-10T09:55:19.369 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:19 vm03 ceph-mon[50536]: [10/Mar/2026:09:55:17] ENGINE Serving on https://192.168.123.100:7151 2026-03-10T09:55:19.369 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:19 vm03 ceph-mon[50536]: [10/Mar/2026:09:55:17] ENGINE Client ('192.168.123.100', 55896) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:55:19.370 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:19 vm03 ceph-mon[50536]: [10/Mar/2026:09:55:18] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T09:55:19.370 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:19 vm03 ceph-mon[50536]: [10/Mar/2026:09:55:18] ENGINE Bus STARTED 2026-03-10T09:55:19.370 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:19 vm03 ceph-mon[50536]: mgrmap e48: y(active, since 1.40736s), standbys: x 2026-03-10T09:55:19.370 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:19 vm03 ceph-mon[50536]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:19.370 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:19 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:19.370 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:19 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[118593]: [10/Mar/2026:09:55:17] ENGINE Bus STARTING 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[118593]: [10/Mar/2026:09:55:17] ENGINE Serving on https://192.168.123.100:7151 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[118593]: [10/Mar/2026:09:55:17] ENGINE Client ('192.168.123.100', 55896) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[118593]: [10/Mar/2026:09:55:18] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[118593]: [10/Mar/2026:09:55:18] ENGINE Bus STARTED 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[118593]: mgrmap e48: y(active, since 1.40736s), standbys: x 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[118593]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:19.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[56720]: [10/Mar/2026:09:55:17] ENGINE Bus STARTING 2026-03-10T09:55:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[56720]: [10/Mar/2026:09:55:17] ENGINE Serving on https://192.168.123.100:7151 2026-03-10T09:55:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[56720]: [10/Mar/2026:09:55:17] ENGINE Client ('192.168.123.100', 55896) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T09:55:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[56720]: [10/Mar/2026:09:55:18] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T09:55:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[56720]: [10/Mar/2026:09:55:18] ENGINE Bus STARTED 2026-03-10T09:55:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[56720]: mgrmap e48: y(active, since 1.40736s), standbys: x 2026-03-10T09:55:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[56720]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:19 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:20.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[118593]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:20.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[118593]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:55:20.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[118593]: Cluster is now healthy 2026-03-10T09:55:20.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[118593]: mgrmap e49: y(active, since 2s), standbys: x 2026-03-10T09:55:20.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:20.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:20.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[56720]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:20.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[56720]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:55:20.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[56720]: Cluster is now healthy 2026-03-10T09:55:20.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[56720]: mgrmap e49: y(active, since 2s), standbys: x 2026-03-10T09:55:20.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:20.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:20 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:20 vm03 ceph-mon[50536]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:20 vm03 ceph-mon[50536]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:55:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:20 vm03 ceph-mon[50536]: Cluster is now healthy 2026-03-10T09:55:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:20 vm03 ceph-mon[50536]: mgrmap e49: y(active, since 2s), standbys: x 2026-03-10T09:55:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:20 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:20 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:21.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:55:20] "GET /metrics HTTP/1.1" 200 35138 "" "Prometheus/2.51.0" 2026-03-10T09:55:22.118 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: mgrmap e50: y(active, since 4s), standbys: x 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:22.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: mgrmap e50: y(active, since 4s), standbys: x 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:22.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:22 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: mgrmap e50: y(active, since 4s), standbys: x 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.conf 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: Updating vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:22 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:23.279 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:55:23.279 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:23.280 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:55:23.280 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:55:23.280 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: Reconfiguring daemon agent.vm03 on vm03 2026-03-10T09:55:23.280 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:23.280 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:55:23.280 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:23.280 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:23.280 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:23.280 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: Reconfiguring daemon agent.vm03 on vm03 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:23.284 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:23 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: Updating vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/config/ceph.client.admin.keyring 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm03", "caps": []}]: dispatch 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: Reconfiguring daemon agent.vm03 on vm03 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:23.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:23 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm00", "caps": []}]: dispatch 2026-03-10T09:55:23.575 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:23.529Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:23.575 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:23.529Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:23.575 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:23.530Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:23.575 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:23.530Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:24.217 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:24.495 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:24 vm03 ceph-mon[50536]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:24.495 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:24 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:24.495 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:24 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:24.495 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:24 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:24.495 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:24 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:24.495 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:24 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:24.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:24.643 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[56720]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:24.644 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:24 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 14 op/s 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: Cluster is now healthy 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 14 op/s 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:25.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: Cluster is now healthy 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:25 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 14 op/s 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: Health check cleared: CEPHADM_AGENT_DOWN (was: 1 Cephadm Agent(s) are not reporting. Hosts may be offline) 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: Cluster is now healthy 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:25.659 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:25.660 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:25.660 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:25.660 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:25 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[118593]: Upgrade: It appears safe to stop mon.c 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[56720]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[56720]: Upgrade: It appears safe to stop mon.c 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.417 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:26 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:26 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:26 vm03 ceph-mon[50536]: Upgrade: It appears safe to stop mon.c 2026-03-10T09:55:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:26 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:26 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:26 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:26 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.298 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:27.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T09:55:27.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: Upgrade: Updating mon.c 2026-03-10T09:55:27.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:27.660 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: Deploying daemon mon.c on vm00 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[118593]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: Upgrade: Updating mon.c 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: Deploying daemon mon.c on vm00 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.661 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-mon[56720]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: Upgrade: Updating mon.c 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: Deploying daemon mon.c on vm00 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:27 vm03 ceph-mon[50536]: from='mgr.25264 ' entity='mgr.y' 2026-03-10T09:55:27.945 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 systemd[1]: Stopping Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:28.227 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:27] ENGINE Bus STOPPING 2026-03-10T09:55:28.227 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:28] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T09:55:28.227 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:28] ENGINE Bus STOPPED 2026-03-10T09:55:28.227 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:28] ENGINE Bus STARTING 2026-03-10T09:55:28.228 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c[56716]: 2026-03-10T09:55:27.943+0000 7f8f00f1d700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:55:28.228 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c[56716]: 2026-03-10T09:55:27.944+0000 7f8f00f1d700 -1 mon.c@1(peon) e3 *** Got Signal Terminated *** 2026-03-10T09:55:28.499 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:28] ENGINE Serving on http://:::9283 2026-03-10T09:55:28.499 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:28 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:09:55:28] ENGINE Bus STARTED 2026-03-10T09:55:28.500 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 podman[121224]: 2026-03-10 09:55:28.227527611 +0000 UTC m=+0.302471958 container died 9b572ece92f49d681a5147f00e6f905dec4d4ca7b10c7458fcfbfe7aadac693f (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, GIT_CLEAN=True, ceph=True, io.openshift.expose-services=, distribution-scope=public, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.k8s.display-name=CentOS Stream 8, release=754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.buildah.version=1.19.8, vcs-type=git, com.redhat.component=centos-stream-container, maintainer=Guillaume Abrioux , io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, RELEASE=HEAD, version=8, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/) 2026-03-10T09:55:28.500 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 podman[121224]: 2026-03-10 09:55:28.244139254 +0000 UTC m=+0.319083601 container remove 9b572ece92f49d681a5147f00e6f905dec4d4ca7b10c7458fcfbfe7aadac693f (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, io.openshift.expose-services=, version=8, RELEASE=HEAD, architecture=x86_64, release=754, CEPH_POINT_RELEASE=-17.2.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, GIT_BRANCH=HEAD, vcs-type=git, maintainer=Guillaume Abrioux , ceph=True, com.redhat.component=centos-stream-container, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream) 2026-03-10T09:55:28.500 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 bash[121224]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c 2026-03-10T09:55:28.500 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.c.service: Deactivated successfully. 2026-03-10T09:55:28.500 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 systemd[1]: Stopped Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:28.500 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.c.service: Consumed 9.808s CPU time. 2026-03-10T09:55:28.500 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 systemd[1]: Starting Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 podman[121357]: 2026-03-10 09:55:28.605262683 +0000 UTC m=+0.021081424 container create 3fb6e0b8d0c6ff35c9db73ef2bb941a367a97deb79d3206d8e3485e19a38ae71 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223) 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 podman[121357]: 2026-03-10 09:55:28.645216973 +0000 UTC m=+0.061035704 container init 3fb6e0b8d0c6ff35c9db73ef2bb941a367a97deb79d3206d8e3485e19a38ae71 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 podman[121357]: 2026-03-10 09:55:28.651433944 +0000 UTC m=+0.067252686 container start 3fb6e0b8d0c6ff35c9db73ef2bb941a367a97deb79d3206d8e3485e19a38ae71 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223) 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 bash[121357]: 3fb6e0b8d0c6ff35c9db73ef2bb941a367a97deb79d3206d8e3485e19a38ae71 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 podman[121357]: 2026-03-10 09:55:28.597344126 +0000 UTC m=+0.013162877 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 systemd[1]: Started Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: pidfile_write: ignore empty --pid-file 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: load: jerasure load: lrc 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: RocksDB version: 7.9.2 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Git sha 0 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: DB SUMMARY 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: DB Session ID: QFCOOS6480WREGQOG6NK 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: CURRENT file: CURRENT 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: MANIFEST file: MANIFEST-000009 size: 2089 Bytes 2026-03-10T09:55:28.772 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: SST files in /var/lib/ceph/mon/ceph-c/store.db dir, Total Num: 1, files: 000042.sst 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-c/store.db: 000040.log size: 138520 ; 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.error_if_exists: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.create_if_missing: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.paranoid_checks: 1 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.env: 0x55fdd4f92dc0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.info_log: 0x55fdd72e05c0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.statistics: (nil) 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.use_fsync: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_log_file_size: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.allow_fallocate: 1 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.use_direct_reads: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.db_log_dir: 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.wal_dir: 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.write_buffer_manager: 0x55fdd72e5900 2026-03-10T09:55:28.773 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.unordered_write: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.row_cache: None 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.wal_filter: None 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.two_write_queues: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.wal_compression: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.atomic_flush: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.log_readahead_size: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_background_jobs: 2 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_background_compactions: -1 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_subcompactions: 1 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T09:55:28.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_open_files: -1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_background_flushes: -1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Compression algorithms supported: 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: kZSTD supported: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: kXpressCompression supported: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: kBZip2Compression supported: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: kLZ4Compression supported: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: kZlibCompression supported: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: kSnappyCompression supported: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.merge_operator: 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_filter: None 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55fdd72e05a0) 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: cache_index_and_filter_blocks: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: pin_top_level_index_and_filter: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: index_type: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: data_block_index_type: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: index_shortening: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: checksum: 4 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: no_block_cache: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache: 0x55fdd7305350 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_name: BinnedLRUCache 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_options: 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: capacity : 536870912 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: num_shard_bits : 4 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: strict_capacity_limit : 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: high_pri_pool_ratio: 0.000 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_compressed: (nil) 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: persistent_cache: (nil) 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: block_size: 4096 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: block_size_deviation: 10 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: block_restart_interval: 16 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: index_block_restart_interval: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: metadata_block_size: 4096 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: partition_filters: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: use_delta_encoding: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: filter_policy: bloomfilter 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: whole_key_filtering: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: verify_compression: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: read_amp_bytes_per_bit: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: format_version: 5 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: enable_index_compression: 1 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: block_align: 0 2026-03-10T09:55:28.775 INFO:journalctl@ceph.mon.c.vm00.stdout: max_auto_readahead_size: 262144 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout: prepopulate_block_cache: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout: initial_auto_readahead_size: 8192 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression: NoCompression 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.num_levels: 7 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.inplace_update_support: 0 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T09:55:28.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.bloom_locality: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.max_successive_merges: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.ttl: 2592000 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.enable_blob_files: false 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.min_blob_size: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 42.sst 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 44, last_sequence is 29277, log_number is 40,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 40 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 8d22c9c5-d480-4ae9-92e8-040457166903 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136528688037, "job": 1, "event": "recovery_started", "wal_files": [40]} 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #40 mode 2 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136528689402, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 45, "file_size": 93458, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 29282, "largest_seqno": 29318, "table_properties": {"data_size": 92033, "index_size": 264, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 133, "raw_key_size": 679, "raw_average_key_size": 26, "raw_value_size": 91280, "raw_average_value_size": 3510, "num_data_blocks": 11, "num_entries": 26, "num_filter_entries": 26, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773136528, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "8d22c9c5-d480-4ae9-92e8-040457166903", "db_session_id": "QFCOOS6480WREGQOG6NK", "orig_file_number": 45, "seqno_to_time_mapping": "N/A"}} 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136528689469, "job": 1, "event": "recovery_finished"} 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/version_set.cc:5047] Creating manifest 47 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-c/store.db/000040.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55fdd7306e00 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: DB pointer 0x55fdd741c000 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:28 vm00 ceph-mon[121371]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: ** DB Stats ** 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: L0 1/0 91.27 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 83.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: L6 1/0 11.10 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Sum 2/0 11.19 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 83.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 83.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 83.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T09:55:28.777 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative compaction: 0.00 GB write, 6.04 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval compaction: 0.00 GB write, 6.04 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: Block cache BinnedLRUCache@0x55fdd7305350#2 capacity: 512.00 MB usage: 0.53 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 7e-06 secs_since: 0 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,0.17 KB,3.27826e-05%) IndexBlock(1,0.36 KB,6.85453e-05%) Misc(1,0.00 KB,0%) 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:28.778 INFO:journalctl@ceph.mon.c.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T09:55:30.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 systemd[1]: Stopping Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: mon.c calling monitor election 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: mon.a calling monitor election 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: monmap epoch 3 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: min_mon_release 17 (quincy) 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: election_strategy: 1 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: fsmap 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: mgrmap e50: y(active, since 12s), standbys: x 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: overall HEALTH_OK 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: Upgrade: It appears safe to stop mon.c 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:30.661 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: mon.c calling monitor election 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: mon.a calling monitor election 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: monmap epoch 3 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: min_mon_release 17 (quincy) 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: election_strategy: 1 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: fsmap 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: mgrmap e50: y(active, since 12s), standbys: x 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: overall HEALTH_OK 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: Upgrade: It appears safe to stop mon.c 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-mon[121371]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c[121367]: 2026-03-10T09:55:30.420+0000 7fa224a37640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c[121367]: 2026-03-10T09:55:30.420+0000 7fa224a37640 -1 mon.c@1(peon) e3 *** Got Signal Terminated *** 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 podman[121783]: 2026-03-10 09:55:30.584068331 +0000 UTC m=+0.181487369 container died 3fb6e0b8d0c6ff35c9db73ef2bb941a367a97deb79d3206d8e3485e19a38ae71 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 podman[121783]: 2026-03-10 09:55:30.603904953 +0000 UTC m=+0.201323980 container remove 3fb6e0b8d0c6ff35c9db73ef2bb941a367a97deb79d3206d8e3485e19a38ae71 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default) 2026-03-10T09:55:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 bash[121783]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: mon.c calling monitor election 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: mon.a calling monitor election 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: monmap epoch 3 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: min_mon_release 17 (quincy) 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: election_strategy: 1 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: fsmap 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: mgrmap e50: y(active, since 12s), standbys: x 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: overall HEALTH_OK 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: Upgrade: It appears safe to stop mon.c 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:30 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:30.946 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.c.service: Deactivated successfully. 2026-03-10T09:55:30.946 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 systemd[1]: Stopped Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:30.946 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 systemd[1]: Starting Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:30.947 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:55:30] "GET /metrics HTTP/1.1" 200 35138 "" "Prometheus/2.51.0" 2026-03-10T09:55:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 podman[121892]: 2026-03-10 09:55:30.946910176 +0000 UTC m=+0.017016843 container create 9fee6887b35b5bb480921935cea59236b4d2a06f1c198313ecbf7584b50bbee7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-10T09:55:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 podman[121892]: 2026-03-10 09:55:30.982052957 +0000 UTC m=+0.052159624 container init 9fee6887b35b5bb480921935cea59236b4d2a06f1c198313ecbf7584b50bbee7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:55:31.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 podman[121892]: 2026-03-10 09:55:30.985884811 +0000 UTC m=+0.055991468 container start 9fee6887b35b5bb480921935cea59236b4d2a06f1c198313ecbf7584b50bbee7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2) 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 bash[121892]: 9fee6887b35b5bb480921935cea59236b4d2a06f1c198313ecbf7584b50bbee7 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 podman[121892]: 2026-03-10 09:55:30.940121694 +0000 UTC m=+0.010228361 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:30 vm00 systemd[1]: Started Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: pidfile_write: ignore empty --pid-file 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: load: jerasure load: lrc 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: RocksDB version: 7.9.2 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Git sha 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: DB SUMMARY 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: DB Session ID: M74I2DPF7O401131DZSO 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: CURRENT file: CURRENT 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: MANIFEST file: MANIFEST-000047 size: 263 Bytes 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: SST files in /var/lib/ceph/mon/ceph-c/store.db dir, Total Num: 2, files: 000042.sst 000045.sst 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-c/store.db: 000046.log size: 445432 ; 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.error_if_exists: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.create_if_missing: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.paranoid_checks: 1 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.env: 0x5632adbb7dc0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.info_log: 0x5632afdc17e0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.statistics: (nil) 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.use_fsync: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_log_file_size: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.allow_fallocate: 1 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.use_direct_reads: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.db_log_dir: 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.wal_dir: 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.write_buffer_manager: 0x5632afdc5900 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T09:55:31.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.unordered_write: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.row_cache: None 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.wal_filter: None 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.two_write_queues: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.wal_compression: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.atomic_flush: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.log_readahead_size: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_background_jobs: 2 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_background_compactions: -1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_subcompactions: 1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_open_files: -1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_background_flushes: -1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Compression algorithms supported: 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: kZSTD supported: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: kXpressCompression supported: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: kBZip2Compression supported: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: kLZ4Compression supported: 1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: kZlibCompression supported: 1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: kSnappyCompression supported: 1 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000047 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.merge_operator: 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_filter: None 2026-03-10T09:55:31.373 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5632afdc1440) 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: cache_index_and_filter_blocks: 1 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: pin_top_level_index_and_filter: 1 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: index_type: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: data_block_index_type: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: index_shortening: 1 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: checksum: 4 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: no_block_cache: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache: 0x5632afde49b0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_name: BinnedLRUCache 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_options: 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: capacity : 536870912 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: num_shard_bits : 4 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: strict_capacity_limit : 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: high_pri_pool_ratio: 0.000 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_compressed: (nil) 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: persistent_cache: (nil) 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: block_size: 4096 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: block_size_deviation: 10 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: block_restart_interval: 16 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: index_block_restart_interval: 1 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: metadata_block_size: 4096 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: partition_filters: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: use_delta_encoding: 1 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: filter_policy: bloomfilter 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: whole_key_filtering: 1 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: verify_compression: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: read_amp_bytes_per_bit: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: format_version: 5 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: enable_index_compression: 1 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: block_align: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: max_auto_readahead_size: 262144 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: prepopulate_block_cache: 0 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: initial_auto_readahead_size: 8192 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression: NoCompression 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T09:55:31.374 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.num_levels: 7 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.inplace_update_support: 0 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T09:55:31.375 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.bloom_locality: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.max_successive_merges: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.ttl: 2592000 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.enable_blob_files: false 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.min_blob_size: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 42.sst 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000047 succeeded,manifest_file_number is 47, next_file_number is 49, last_sequence is 29318, log_number is 41,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 41 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 41 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 8d22c9c5-d480-4ae9-92e8-040457166903 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136531013693, "job": 1, "event": "recovery_started", "wal_files": [46]} 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #46 mode 2 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136531016217, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 50, "file_size": 253375, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 29323, "largest_seqno": 29449, "table_properties": {"data_size": 251615, "index_size": 535, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 197, "raw_key_size": 1818, "raw_average_key_size": 25, "raw_value_size": 249863, "raw_average_value_size": 3519, "num_data_blocks": 23, "num_entries": 71, "num_filter_entries": 71, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773136531, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "8d22c9c5-d480-4ae9-92e8-040457166903", "db_session_id": "M74I2DPF7O401131DZSO", "orig_file_number": 50, "seqno_to_time_mapping": "N/A"}} 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136531016353, "job": 1, "event": "recovery_finished"} 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/version_set.cc:5047] Creating manifest 52 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-c/store.db/000046.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5632afde6e00 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: DB pointer 0x5632afef8000 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: ** DB Stats ** 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: L0 2/0 338.70 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 120.9 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: L6 1/0 11.10 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T09:55:31.376 INFO:journalctl@ceph.mon.c.vm00.stdout: Sum 3/0 11.43 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 120.9 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 120.9 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 120.9 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative compaction: 0.00 GB write, 29.29 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval compaction: 0.00 GB write, 29.29 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Block cache BinnedLRUCache@0x5632afde49b0#2 capacity: 512.00 MB usage: 1.41 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.1e-05 secs_since: 0 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,0.41 KB,7.7486e-05%) IndexBlock(2,1.00 KB,0.000190735%) Misc(1,0.00 KB,0%) 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: starting mon.c rank 1 at public addrs [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] at bind addrs [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon_data /var/lib/ceph/mon/ceph-c fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: mon.c@-1(???) e3 preinit fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: mon.c@-1(???).mds e1 new map 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: mon.c@-1(???).mds e1 print_map 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: e1 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: legacy client fscid: -1 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout: No filesystems configured 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: mon.c@-1(???).osd e96 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: mon.c@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: mon.c@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: mon.c@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T09:55:31.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:31 vm00 ceph-mon[121907]: mon.c@-1(???).paxosservice(auth 1..24) refresh upgraded, format 0 -> 3 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: mon.c calling monitor election 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: mon.a calling monitor election 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: monmap epoch 3 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: min_mon_release 17 (quincy) 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: election_strategy: 1 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: fsmap 2026-03-10T09:55:32.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: mgrmap e50: y(active, since 14s), standbys: x 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: overall HEALTH_OK 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: mon.c calling monitor election 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: mon.a calling monitor election 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: monmap epoch 3 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: min_mon_release 17 (quincy) 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: election_strategy: 1 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: fsmap 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: mgrmap e50: y(active, since 14s), standbys: x 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: overall HEALTH_OK 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: mon.c calling monitor election 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: mon.a calling monitor election 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: monmap epoch 3 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: min_mon_release 17 (quincy) 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: election_strategy: 1 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: fsmap 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: mgrmap e50: y(active, since 14s), standbys: x 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: overall HEALTH_OK 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:32.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:32 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.621 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:33.530Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:33.621 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:33.530Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:33.621 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:33.531Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:33.621 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:33.531Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:33.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:33.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:33.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:33.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:33.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:33 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:34 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:34.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T09:55:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.375 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:35 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.573 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:36.574 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-mon[50536]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:36.862 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:36.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:36.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T09:55:36.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-10T09:55:36.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:36.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:36.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:36.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:36.868 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 systemd[1]: Stopping Ceph mon.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:36.868 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b[50532]: 2026-03-10T09:55:36.646+0000 7ffa2f5bf700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:55:36.868 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b[50532]: 2026-03-10T09:55:36.646+0000 7ffa2f5bf700 -1 mon.b@2(peon) e3 *** Got Signal Terminated *** 2026-03-10T09:55:37.133 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:37.133 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 podman[123636]: 2026-03-10 09:55:36.867764373 +0000 UTC m=+0.235715214 container died 6c31bf149b5f9c2b55445d5515a764ef46cf2796aae197e0c0b647acf711f74d (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b, distribution-scope=public, build-date=2022-05-03T08:36:31.336870, ceph=True, GIT_CLEAN=True, RELEASE=HEAD, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., vcs-type=git, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, CEPH_POINT_RELEASE=-17.2.0, maintainer=Guillaume Abrioux , release=754) 2026-03-10T09:55:37.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 podman[123636]: 2026-03-10 09:55:36.888088634 +0000 UTC m=+0.256039475 container remove 6c31bf149b5f9c2b55445d5515a764ef46cf2796aae197e0c0b647acf711f74d (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-type=git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, RELEASE=HEAD, maintainer=Guillaume Abrioux , version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, release=754, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, GIT_BRANCH=HEAD, ceph=True, GIT_CLEAN=True) 2026-03-10T09:55:37.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 bash[123636]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b 2026-03-10T09:55:37.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.b.service: Deactivated successfully. 2026-03-10T09:55:37.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 systemd[1]: Stopped Ceph mon.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:37.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:36 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.b.service: Consumed 10.707s CPU time. 2026-03-10T09:55:37.134 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 systemd[1]: Starting Ceph mon.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 podman[123746]: 2026-03-10 09:55:37.238969781 +0000 UTC m=+0.020246057 container create 8f6ea28bf3911ebf1f815913d854a19dbe1706e1c71ffde48f50cbd9f4b0dcd1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2) 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 podman[123746]: 2026-03-10 09:55:37.280627579 +0000 UTC m=+0.061903864 container init 8f6ea28bf3911ebf1f815913d854a19dbe1706e1c71ffde48f50cbd9f4b0dcd1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 podman[123746]: 2026-03-10 09:55:37.283741233 +0000 UTC m=+0.065017509 container start 8f6ea28bf3911ebf1f815913d854a19dbe1706e1c71ffde48f50cbd9f4b0dcd1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, ceph=True, org.label-schema.schema-version=1.0) 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 bash[123746]: 8f6ea28bf3911ebf1f815913d854a19dbe1706e1c71ffde48f50cbd9f4b0dcd1 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 podman[123746]: 2026-03-10 09:55:37.229498912 +0000 UTC m=+0.010775198 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 systemd[1]: Started Ceph mon.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: pidfile_write: ignore empty --pid-file 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: load: jerasure load: lrc 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: RocksDB version: 7.9.2 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Git sha 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: DB SUMMARY 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: DB Session ID: LH6IYNTW3O6XDRJZU03C 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: CURRENT file: CURRENT 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: MANIFEST file: MANIFEST-000009 size: 2089 Bytes 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: SST files in /var/lib/ceph/mon/ceph-b/store.db dir, Total Num: 1, files: 000042.sst 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-b/store.db: 000040.log size: 1444466 ; 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.error_if_exists: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.create_if_missing: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.paranoid_checks: 1 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.env: 0x55ab21c4adc0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.info_log: 0x55ab238cd7e0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.statistics: (nil) 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.use_fsync: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_log_file_size: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.allow_fallocate: 1 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.use_direct_reads: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.db_log_dir: 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.wal_dir: 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T09:55:37.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.write_buffer_manager: 0x55ab238d1900 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.unordered_write: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.row_cache: None 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.wal_filter: None 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.two_write_queues: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.wal_compression: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.atomic_flush: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.log_readahead_size: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_background_jobs: 2 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_background_compactions: -1 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_subcompactions: 1 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T09:55:37.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_open_files: -1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_background_flushes: -1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Compression algorithms supported: 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: kZSTD supported: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: kXpressCompression supported: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: kBZip2Compression supported: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: kLZ4Compression supported: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: kZlibCompression supported: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: kSnappyCompression supported: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.merge_operator: 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_filter: None 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55ab238cd440) 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: cache_index_and_filter_blocks: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: pin_top_level_index_and_filter: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: index_type: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: data_block_index_type: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: index_shortening: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: checksum: 4 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: no_block_cache: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: block_cache: 0x55ab238f09b0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: block_cache_name: BinnedLRUCache 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: block_cache_options: 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: capacity : 536870912 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: num_shard_bits : 4 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: strict_capacity_limit : 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: high_pri_pool_ratio: 0.000 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: block_cache_compressed: (nil) 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: persistent_cache: (nil) 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: block_size: 4096 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: block_size_deviation: 10 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: block_restart_interval: 16 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: index_block_restart_interval: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: metadata_block_size: 4096 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: partition_filters: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: use_delta_encoding: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: filter_policy: bloomfilter 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: whole_key_filtering: 1 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: verify_compression: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: read_amp_bytes_per_bit: 0 2026-03-10T09:55:37.550 INFO:journalctl@ceph.mon.b.vm03.stdout: format_version: 5 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout: enable_index_compression: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout: block_align: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout: max_auto_readahead_size: 262144 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout: prepopulate_block_cache: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout: initial_auto_readahead_size: 8192 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression: NoCompression 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.num_levels: 7 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T09:55:37.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.inplace_update_support: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.bloom_locality: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.max_successive_merges: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.ttl: 2592000 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.enable_blob_files: false 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.min_blob_size: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 42.sst 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 44, last_sequence is 29147, log_number is 40,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 40 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 75f185b3-e1f7-4036-bc34-0329023ae5ec 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136537307633, "job": 1, "event": "recovery_started", "wal_files": [40]} 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #40 mode 2 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136537314239, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 45, "file_size": 810738, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 29152, "largest_seqno": 29982, "table_properties": {"data_size": 806846, "index_size": 1705, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1157, "raw_key_size": 10714, "raw_average_key_size": 24, "raw_value_size": 798883, "raw_average_value_size": 1832, "num_data_blocks": 74, "num_entries": 436, "num_filter_entries": 436, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773136537, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "75f185b3-e1f7-4036-bc34-0329023ae5ec", "db_session_id": "LH6IYNTW3O6XDRJZU03C", "orig_file_number": 45, "seqno_to_time_mapping": "N/A"}} 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773136537314327, "job": 1, "event": "recovery_finished"} 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/version_set.cc:5047] Creating manifest 47 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-b/store.db/000040.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55ab238f2e00 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: DB pointer 0x55ab23a04000 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: ** DB Stats ** 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: ** Compaction Stats [default] ** 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: L0 1/0 791.74 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 152.5 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: L6 1/0 11.10 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Sum 2/0 11.87 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 152.5 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 152.5 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T09:55:37.552 INFO:journalctl@ceph.mon.b.vm03.stdout: 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: ** Compaction Stats [default] ** 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 152.5 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: Flush(GB): cumulative 0.001, interval 0.001 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: Cumulative compaction: 0.00 GB write, 75.41 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: Interval compaction: 0.00 GB write, 75.41 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: Block cache BinnedLRUCache@0x55ab238f09b0#2 capacity: 512.00 MB usage: 3.11 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.2e-05 secs_since: 0 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,1.28 KB,0.000244379%) IndexBlock(1,1.83 KB,0.000348687%) Misc(1,0.00 KB,0%) 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: starting mon.b rank 2 at public addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] at bind addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon_data /var/lib/ceph/mon/ceph-b fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: mon.b@-1(???) e3 preinit fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: mon.b@-1(???).mds e1 new map 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: mon.b@-1(???).mds e1 print_map 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: e1 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: legacy client fscid: -1 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout: No filesystems configured 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: mon.b@-1(???).osd e96 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: mon.b@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: mon.b@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: mon.b@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T09:55:37.553 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:37 vm03 ceph-mon[123760]: mon.b@-1(???).paxosservice(auth 1..25) refresh upgraded, format 0 -> 3 2026-03-10T09:55:41.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:55:40] "GET /metrics HTTP/1.1" 200 37884 "" "Prometheus/2.51.0" 2026-03-10T09:55:43.073 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 7s ago 9m - - 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 starting - - - - 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (3m) 7s ago 7m 26.5M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (3m) 24s ago 6m 47.0M - dad864ee21e9 011f2081bf92 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (91s) 7s ago 6m 47.5M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (37s) 24s ago 8m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (2m) 7s ago 9m 549M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (32s) 7s ago 9m 49.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 starting - - - 2048M 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (12s) 7s ago 9m 25.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (3m) 7s ago 7m 9601k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (3m) 24s ago 7m 9927k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (8m) 7s ago 8m 57.6M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (8m) 7s ago 8m 54.4M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (8m) 7s ago 8m 50.4M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (8m) 7s ago 8m 51.6M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (8m) 24s ago 8m 54.6M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (7m) 24s ago 7m 53.6M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (7m) 24s ago 7m 51.5M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (7m) 24s ago 7m 51.7M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (93s) 24s ago 7m 43.9M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (6m) 7s ago 6m 93.8M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (6m) 24s ago 6m 94.9M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (6m) 7s ago 6m 94.7M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:55:43.612 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (6m) 24s ago 6m 92.0M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:55:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:43.531Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:43.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:43.532Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:43.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:43.532Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:43.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:43.533Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 12, 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:55:43.926 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:55:44.173 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:44.187 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:55:44.187 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:55:44.187 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:55:44.187 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:55:44.187 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:55:44.187 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:55:44.188 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:55:44.188 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "4/23 daemons upgraded", 2026-03-10T09:55:44.188 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading mon daemons", 2026-03-10T09:55:44.188 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:55:44.188 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:55:44.508 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='client.34131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='client.34134 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: mon.a calling monitor election 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: monmap epoch 3 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:44.767 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: min_mon_release 17 (quincy) 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: election_strategy: 1 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: fsmap 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: mgrmap e50: y(active, since 26s), standbys: x 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: overall HEALTH_OK 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: mon.c calling monitor election 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: mon.a calling monitor election 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='client.34140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: mon.b calling monitor election 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: monmap epoch 4 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: last_changed 2026-03-10T09:55:43.577890+0000 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: min_mon_release 19 (squid) 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: election_strategy: 1 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: fsmap 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: mgrmap e50: y(active, since 26s), standbys: x 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: overall HEALTH_OK 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/2997844070' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.768 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:44 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/1712818523' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='client.34131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='client.34134 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: mon.a calling monitor election 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: monmap epoch 3 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: min_mon_release 17 (quincy) 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: election_strategy: 1 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: fsmap 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: mgrmap e50: y(active, since 26s), standbys: x 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: overall HEALTH_OK 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: mon.c calling monitor election 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: mon.a calling monitor election 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='client.34140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: mon.b calling monitor election 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: monmap epoch 4 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: last_changed 2026-03-10T09:55:43.577890+0000 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: min_mon_release 19 (squid) 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: election_strategy: 1 2026-03-10T09:55:44.853 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='client.34131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='client.34134 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: mon.a calling monitor election 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: monmap epoch 3 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: last_changed 2026-03-10T09:46:48.290879+0000 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: min_mon_release 17 (quincy) 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: election_strategy: 1 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: fsmap 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: mgrmap e50: y(active, since 26s), standbys: x 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: overall HEALTH_OK 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: mon.c calling monitor election 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: mon.a calling monitor election 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='client.34140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: mon.b calling monitor election 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: monmap epoch 4 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: last_changed 2026-03-10T09:55:43.577890+0000 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: created 2026-03-10T09:46:01.447541+0000 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: min_mon_release 19 (squid) 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: election_strategy: 1 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: fsmap 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: mgrmap e50: y(active, since 26s), standbys: x 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: overall HEALTH_OK 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/2997844070' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:55:44.854 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/1712818523' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: 2: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.b 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: fsmap 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: mgrmap e50: y(active, since 26s), standbys: x 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: overall HEALTH_OK 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/2997844070' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:44.855 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:44 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/1712818523' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: Reconfiguring mon.a (monmap changed)... 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: Reconfiguring daemon mon.a on vm00 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: Reconfiguring mgr.y (monmap changed)... 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: Reconfiguring daemon mgr.y on vm00 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='client.34152 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: Reconfiguring mon.c (monmap changed)... 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: Reconfiguring daemon mon.c on vm00 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.776 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: Reconfiguring mon.a (monmap changed)... 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: Reconfiguring daemon mon.a on vm00 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: Reconfiguring mgr.y (monmap changed)... 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: Reconfiguring daemon mgr.y on vm00 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='client.34152 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: Reconfiguring mon.c (monmap changed)... 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: Reconfiguring daemon mon.c on vm00 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:55:45.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: Reconfiguring mon.a (monmap changed)... 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: Reconfiguring daemon mon.a on vm00 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: Reconfiguring mgr.y (monmap changed)... 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: Reconfiguring daemon mgr.y on vm00 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='client.34152 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: Reconfiguring mon.c (monmap changed)... 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: Reconfiguring daemon mon.c on vm00 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:55:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: Reconfiguring osd.0 (monmap changed)... 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: Reconfiguring daemon osd.0 on vm00 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: Reconfiguring osd.1 (monmap changed)... 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: Reconfiguring daemon osd.1 on vm00 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: Reconfiguring osd.2 (monmap changed)... 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: Reconfiguring daemon osd.2 on vm00 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: Reconfiguring osd.3 (monmap changed)... 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: Reconfiguring daemon osd.3 on vm00 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:46.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: Reconfiguring osd.0 (monmap changed)... 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: Reconfiguring daemon osd.0 on vm00 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: Reconfiguring osd.1 (monmap changed)... 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: Reconfiguring daemon osd.1 on vm00 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: Reconfiguring osd.2 (monmap changed)... 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: Reconfiguring daemon osd.2 on vm00 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: Reconfiguring osd.3 (monmap changed)... 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: Reconfiguring daemon osd.3 on vm00 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:46.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:46.925 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: Reconfiguring osd.0 (monmap changed)... 2026-03-10T09:55:46.925 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: Reconfiguring daemon osd.0 on vm00 2026-03-10T09:55:46.925 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: Reconfiguring osd.1 (monmap changed)... 2026-03-10T09:55:46.925 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: Reconfiguring daemon osd.1 on vm00 2026-03-10T09:55:46.925 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: Reconfiguring osd.2 (monmap changed)... 2026-03-10T09:55:46.925 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: Reconfiguring daemon osd.2 on vm00 2026-03-10T09:55:46.925 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: Reconfiguring osd.3 (monmap changed)... 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: Reconfiguring daemon osd.3 on vm00 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T09:55:46.926 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:47.196 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:46 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:47.600 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:55:47.329+0000 7f8211476640 -1 mgr.server handle_report got status from non-daemon mon.b 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: Reconfiguring rgw.foo.vm00.gcwrcv (monmap changed)... 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: Reconfiguring daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: Reconfiguring rgw.smpl.vm00.tpyqjn (monmap changed)... 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: Reconfiguring daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: Reconfiguring mon.b (monmap changed)... 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: Reconfiguring daemon mon.b on vm03 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T09:55:47.788 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: Reconfiguring rgw.foo.vm00.gcwrcv (monmap changed)... 2026-03-10T09:55:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: Reconfiguring daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T09:55:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: Reconfiguring rgw.smpl.vm00.tpyqjn (monmap changed)... 2026-03-10T09:55:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: Reconfiguring daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T09:55:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: Reconfiguring mon.b (monmap changed)... 2026-03-10T09:55:47.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: Reconfiguring daemon mon.b on vm03 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: Reconfiguring rgw.foo.vm00.gcwrcv (monmap changed)... 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: Reconfiguring daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: Reconfiguring rgw.smpl.vm00.tpyqjn (monmap changed)... 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: Reconfiguring daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: Reconfiguring mon.b (monmap changed)... 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: Reconfiguring daemon mon.b on vm03 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T09:55:47.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: Reconfiguring mgr.x (monmap changed)... 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: Reconfiguring daemon mgr.x on vm03 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: Reconfiguring osd.4 (monmap changed)... 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: Reconfiguring daemon osd.4 on vm03 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: Reconfiguring osd.5 (monmap changed)... 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: Reconfiguring daemon osd.5 on vm03 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: Reconfiguring osd.6 (monmap changed)... 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: Reconfiguring daemon osd.6 on vm03 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:48.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: Reconfiguring mgr.x (monmap changed)... 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: Reconfiguring daemon mgr.x on vm03 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: Reconfiguring osd.4 (monmap changed)... 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: Reconfiguring daemon osd.4 on vm03 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: Reconfiguring osd.5 (monmap changed)... 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: Reconfiguring daemon osd.5 on vm03 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: Reconfiguring osd.6 (monmap changed)... 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: Reconfiguring daemon osd.6 on vm03 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: Reconfiguring mgr.x (monmap changed)... 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: Reconfiguring daemon mgr.x on vm03 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: Reconfiguring osd.4 (monmap changed)... 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: Reconfiguring daemon osd.4 on vm03 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: Reconfiguring osd.5 (monmap changed)... 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: Reconfiguring daemon osd.5 on vm03 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: Reconfiguring osd.6 (monmap changed)... 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: Reconfiguring daemon osd.6 on vm03 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T09:55:48.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: Reconfiguring osd.7 (monmap changed)... 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: Reconfiguring daemon osd.7 on vm03 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: Reconfiguring rgw.foo.vm03.smqfat (monmap changed)... 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: Reconfiguring daemon rgw.foo.vm03.smqfat on vm03 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: Reconfiguring rgw.smpl.vm03.evsibt (monmap changed)... 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: Reconfiguring daemon rgw.smpl.vm03.evsibt on vm03 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: Reconfiguring osd.7 (monmap changed)... 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: Reconfiguring daemon osd.7 on vm03 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: Reconfiguring rgw.foo.vm03.smqfat (monmap changed)... 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: Reconfiguring daemon rgw.foo.vm03.smqfat on vm03 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: Reconfiguring rgw.smpl.vm03.evsibt (monmap changed)... 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: Reconfiguring daemon rgw.smpl.vm03.evsibt on vm03 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: Reconfiguring osd.7 (monmap changed)... 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: Reconfiguring daemon osd.7 on vm03 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: Reconfiguring rgw.foo.vm03.smqfat (monmap changed)... 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: Reconfiguring daemon rgw.foo.vm03.smqfat on vm03 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: Reconfiguring rgw.smpl.vm03.evsibt (monmap changed)... 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: Reconfiguring daemon rgw.smpl.vm03.evsibt on vm03 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.919 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:49.920 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.860 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.860 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.860 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.861 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.862 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.862 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.862 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:50.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:55:50 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:55:50] "GET /metrics HTTP/1.1" 200 37958 "" "Prometheus/2.51.0" 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.099 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.100 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.101 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.101 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.101 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.101 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.101 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.101 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.101 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.101 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:52.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:52.123 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.026 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.027 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.028 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.028 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.028 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.028 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.233 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:53.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:53.539Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:53.539Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:53.540Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:53.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:55:53.540Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:55:54.124 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.124 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.125 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:54.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:54.548 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:54 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.060 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.061 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:55.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.213 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.234 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.235 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:56.236 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.171 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:57.232 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:57.233 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:55:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:55:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:58.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:58.955 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.302 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.303 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:55:59.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:55:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:00.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:55:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.000 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:56:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:56:00] "GET /metrics HTTP/1.1" 200 37958 "" "Prometheus/2.51.0" 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:01.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:01.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:02.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.300 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:03.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:03.540Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:03.540Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:03.541Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:03.870 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:03.541Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:04.297 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:04 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:04 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:04.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:04.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:04.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:04.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:04.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:04 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.462 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.463 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.464 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:06.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.186 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:07.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:08.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.284 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.284 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.284 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.284 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.284 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:09.284 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.284 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.284 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.284 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.285 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.286 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:09.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:09.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:09.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:09.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:09.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:10.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:56:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:56:10] "GET /metrics HTTP/1.1" 200 37964 "" "Prometheus/2.51.0" 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:11.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.630 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:12.631 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.637 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.638 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:13.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:13.872 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:13.541Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:13.872 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:13.541Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:13.872 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:13.542Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:13.872 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:13.542Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:14.147 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:14.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:14.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.551 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.829 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:14.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:14.831 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:14.831 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:14.831 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:14.831 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 19s ago 9m - - 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 starting - - - - 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (3m) 19s ago 7m 26.6M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (3m) 55s ago 7m 47.0M - dad864ee21e9 011f2081bf92 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (2m) 19s ago 7m 52.4M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (68s) 55s ago 9m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (3m) 19s ago 10m 554M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (63s) 19s ago 10m 54.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 starting - - - 2048M 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (44s) 19s ago 9m 43.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (3m) 19s ago 7m 9617k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (3m) 55s ago 7m 9927k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (9m) 19s ago 9m 57.7M 4096M 17.2.0 e1d6a67b021e a78d5e95cfdb 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (9m) 19s ago 9m 54.5M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (8m) 19s ago 8m 50.5M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (8m) 19s ago 8m 51.7M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (8m) 55s ago 8m 54.6M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (8m) 55s ago 8m 53.6M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (8m) 55s ago 8m 51.5M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (8m) 55s ago 8m 51.7M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (2m) 55s ago 7m 43.9M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (7m) 19s ago 7m 94.0M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (7m) 55s ago 7m 94.9M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (7m) 19s ago 7m 94.9M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:56:15.334 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (7m) 55s ago 7m 92.0M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 12, 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:56:15.643 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:56:15.709 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.709 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.709 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.709 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:15.710 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:15.711 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "4/23 daemons upgraded", 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading mon daemons", 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:56:15.967 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:56:16.302 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='client.44131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='client.54110 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='client.44143 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3980120208' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/2685618740' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='client.44131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='client.54110 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='client.44143 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3980120208' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.561 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.562 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.562 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.562 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.562 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.562 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.562 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/2685618740' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:56:16.562 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.562 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:16 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='client.44131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='client.54110 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='client.44143 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3980120208' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/2685618740' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:16 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.094 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:17.407 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='client.34176 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='client.34176 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='client.34176 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:17.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.801 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:18.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.683 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.684 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.685 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:19.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:20.872 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:56:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:56:20] "GET /metrics HTTP/1.1" 200 37963 "" "Prometheus/2.51.0" 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.422 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.423 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.423 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.423 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.423 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.423 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.774 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.775 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.776 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:21.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:22.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.556 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.557 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.557 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:23.541Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:23.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:56:23.542Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:23.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:24.274 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:24.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:24.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:24 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:24.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:24 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:24.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:24 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:24.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:24 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:24.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:24 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:24.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:24 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:24.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:24 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:24.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:24 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:24.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:24 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:25.439 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:25 vm03 ceph-mon[123760]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:25.440 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:25 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.440 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:25 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.440 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:25 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.440 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:25 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[118593]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:25.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[121907]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:56:25.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:25.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:25 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.297 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:27.703 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:56:27.703 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.703 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.703 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 973 B/s rd, 0 op/s 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: Upgrade: Setting container_image for all mon 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: Upgrade: Setting container_image for all crash 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 973 B/s rd, 0 op/s 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: Upgrade: Setting container_image for all mon 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: Upgrade: Setting container_image for all crash 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.704 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:27.705 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.705 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:56:27.705 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 973 B/s rd, 0 op/s 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: Upgrade: Setting container_image for all mon 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: Upgrade: Setting container_image for all crash 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:56:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:28.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:27 vm00 systemd[1]: Stopping Ceph osd.0 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:56:28.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[60002]: 2026-03-10T09:56:27.805+0000 7fb15e4d0700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:56:28.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[60002]: 2026-03-10T09:56:27.806+0000 7fb15e4d0700 -1 osd.0 96 *** Got signal Terminated *** 2026-03-10T09:56:28.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[60002]: 2026-03-10T09:56:27.806+0000 7fb15e4d0700 -1 osd.0 96 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:56:28.790 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:28.790 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[118593]: Upgrade: osd.0 is safe to restart 2026-03-10T09:56:28.790 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[118593]: Upgrade: Updating osd.0 2026-03-10T09:56:28.790 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[118593]: Deploying daemon osd.0 on vm00 2026-03-10T09:56:28.790 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[118593]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:56:28.790 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[118593]: osd.0 marked itself down and dead 2026-03-10T09:56:28.790 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 podman[133340]: 2026-03-10 09:56:28.593034623 +0000 UTC m=+0.798319384 container died a78d5e95cfdb21c7417afce8840b334edef0263e6dde8f95e04723814b10ff54 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.component=centos-stream-container, maintainer=Guillaume Abrioux , release=754, CEPH_POINT_RELEASE=-17.2.0, io.k8s.display-name=CentOS Stream 8, distribution-scope=public, vcs-type=git, GIT_REPO=https://github.com/ceph/ceph-container.git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, ceph=True, version=8, GIT_BRANCH=HEAD, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, RELEASE=HEAD, io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc.) 2026-03-10T09:56:28.790 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 podman[133340]: 2026-03-10 09:56:28.630051614 +0000 UTC m=+0.835336375 container remove a78d5e95cfdb21c7417afce8840b334edef0263e6dde8f95e04723814b10ff54 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, vendor=Red Hat, Inc., com.redhat.component=centos-stream-container, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, build-date=2022-05-03T08:36:31.336870, io.openshift.tags=base centos centos-stream, distribution-scope=public, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, RELEASE=HEAD, architecture=x86_64, vcs-type=git, GIT_BRANCH=HEAD, ceph=True, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, version=8, io.openshift.expose-services=, GIT_CLEAN=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com) 2026-03-10T09:56:28.790 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 bash[133340]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0 2026-03-10T09:56:28.790 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 podman[133407]: 2026-03-10 09:56:28.763117978 +0000 UTC m=+0.015105064 container create dc549a16b4bb78a977a4a8a7be8ab48807cabb0a95506022bc59d3e81e8cd5c3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:56:28.792 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:28.792 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[121907]: Upgrade: osd.0 is safe to restart 2026-03-10T09:56:28.792 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[121907]: Upgrade: Updating osd.0 2026-03-10T09:56:28.792 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[121907]: Deploying daemon osd.0 on vm00 2026-03-10T09:56:28.792 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[121907]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:56:28.792 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:28 vm00 ceph-mon[121907]: osd.0 marked itself down and dead 2026-03-10T09:56:28.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:28 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:28.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:28 vm03 ceph-mon[123760]: Upgrade: osd.0 is safe to restart 2026-03-10T09:56:28.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:28 vm03 ceph-mon[123760]: Upgrade: Updating osd.0 2026-03-10T09:56:28.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:28 vm03 ceph-mon[123760]: Deploying daemon osd.0 on vm00 2026-03-10T09:56:28.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:28 vm03 ceph-mon[123760]: Health check failed: 1 Cephadm Agent(s) are not reporting. Hosts may be offline (CEPHADM_AGENT_DOWN) 2026-03-10T09:56:28.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:28 vm03 ceph-mon[123760]: osd.0 marked itself down and dead 2026-03-10T09:56:29.077 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 podman[133407]: 2026-03-10 09:56:28.803638448 +0000 UTC m=+0.055625544 container init dc549a16b4bb78a977a4a8a7be8ab48807cabb0a95506022bc59d3e81e8cd5c3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 podman[133407]: 2026-03-10 09:56:28.806399899 +0000 UTC m=+0.058386985 container start dc549a16b4bb78a977a4a8a7be8ab48807cabb0a95506022bc59d3e81e8cd5c3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 podman[133407]: 2026-03-10 09:56:28.80987205 +0000 UTC m=+0.061859136 container attach dc549a16b4bb78a977a4a8a7be8ab48807cabb0a95506022bc59d3e81e8cd5c3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS) 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 podman[133407]: 2026-03-10 09:56:28.756875299 +0000 UTC m=+0.008862385 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 podman[133407]: 2026-03-10 09:56:28.93746802 +0000 UTC m=+0.189455117 container died dc549a16b4bb78a977a4a8a7be8ab48807cabb0a95506022bc59d3e81e8cd5c3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.build-date=20260223) 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 podman[133407]: 2026-03-10 09:56:28.956498453 +0000 UTC m=+0.208485529 container remove dc549a16b4bb78a977a4a8a7be8ab48807cabb0a95506022bc59d3e81e8cd5c3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, io.buildah.version=1.41.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.0.service: Deactivated successfully. 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.0.service: Unit process 133417 (conmon) remains running after unit stopped. 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.0.service: Unit process 133425 (podman) remains running after unit stopped. 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 systemd[1]: Stopped Ceph osd.0 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:56:29.078 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:28 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.0.service: Consumed 8.915s CPU time, 130.4M memory peak. 2026-03-10T09:56:29.330 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 systemd[1]: Starting Ceph osd.0 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:56:29.330 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 podman[133507]: 2026-03-10 09:56:29.238683316 +0000 UTC m=+0.018812715 container create 15bd7df0deeaaecc1935e61429cd405955bdbace16d361a5d480248db98ed3bc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0) 2026-03-10T09:56:29.330 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 podman[133507]: 2026-03-10 09:56:29.279325134 +0000 UTC m=+0.059454533 container init 15bd7df0deeaaecc1935e61429cd405955bdbace16d361a5d480248db98ed3bc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS) 2026-03-10T09:56:29.330 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 podman[133507]: 2026-03-10 09:56:29.282722816 +0000 UTC m=+0.062852215 container start 15bd7df0deeaaecc1935e61429cd405955bdbace16d361a5d480248db98ed3bc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, ceph=True, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default) 2026-03-10T09:56:29.330 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 podman[133507]: 2026-03-10 09:56:29.283557489 +0000 UTC m=+0.063686888 container attach 15bd7df0deeaaecc1935e61429cd405955bdbace16d361a5d480248db98ed3bc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T09:56:29.330 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 podman[133507]: 2026-03-10 09:56:29.231404298 +0000 UTC m=+0.011533697 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:56:29.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:29 vm00 ceph-mon[118593]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T09:56:29.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:29 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:56:29.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:29 vm00 ceph-mon[118593]: osdmap e97: 8 total, 7 up, 8 in 2026-03-10T09:56:29.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:29 vm00 ceph-mon[121907]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T09:56:29.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:29 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:56:29.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:29 vm00 ceph-mon[121907]: osdmap e97: 8 total, 7 up, 8 in 2026-03-10T09:56:29.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:29.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 bash[133507]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:29.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:29.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 bash[133507]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:29.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:29 vm03 ceph-mon[123760]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T09:56:29.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:29 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:56:29.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:29 vm03 ceph-mon[123760]: osdmap e97: 8 total, 7 up, 8 in 2026-03-10T09:56:30.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:56:30.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:30.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 bash[133507]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:56:30.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 bash[133507]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:30.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:30.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 bash[133507]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:30.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T09:56:30.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 bash[133507]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T09:56:30.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f91417ba-0e37-4e02-ae90-cadd691e5d10/osd-block-74379a85-5860-4155-922a-ce2adedc2262 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-10T09:56:30.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:29 vm00 bash[133507]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f91417ba-0e37-4e02-ae90-cadd691e5d10/osd-block-74379a85-5860-4155-922a-ce2adedc2262 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-10T09:56:30.619 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/ln -snf /dev/ceph-f91417ba-0e37-4e02-ae90-cadd691e5d10/osd-block-74379a85-5860-4155-922a-ce2adedc2262 /var/lib/ceph/osd/ceph-0/block 2026-03-10T09:56:30.619 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 bash[133507]: Running command: /usr/bin/ln -snf /dev/ceph-f91417ba-0e37-4e02-ae90-cadd691e5d10/osd-block-74379a85-5860-4155-922a-ce2adedc2262 /var/lib/ceph/osd/ceph-0/block 2026-03-10T09:56:30.619 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-10T09:56:30.619 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 bash[133507]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-10T09:56:30.619 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T09:56:30.619 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 bash[133507]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T09:56:30.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T09:56:30.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 bash[133507]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T09:56:30.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[133517]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-10T09:56:30.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 bash[133507]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-10T09:56:30.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:30 vm00 podman[133507]: 2026-03-10 09:56:30.242764571 +0000 UTC m=+1.022893959 container died 15bd7df0deeaaecc1935e61429cd405955bdbace16d361a5d480248db98ed3bc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:56:31.049 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:30 vm00 ceph-mon[118593]: osdmap e98: 8 total, 7 up, 8 in 2026-03-10T09:56:31.050 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:56:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:56:30] "GET /metrics HTTP/1.1" 200 37963 "" "Prometheus/2.51.0" 2026-03-10T09:56:31.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:30 vm03 ceph-mon[123760]: osdmap e98: 8 total, 7 up, 8 in 2026-03-10T09:56:31.320 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:31 vm00 ceph-mon[121907]: osdmap e98: 8 total, 7 up, 8 in 2026-03-10T09:56:31.619 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:31 vm00 podman[133507]: 2026-03-10 09:56:31.320694574 +0000 UTC m=+2.100823964 container remove 15bd7df0deeaaecc1935e61429cd405955bdbace16d361a5d480248db98ed3bc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True) 2026-03-10T09:56:31.619 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:31 vm00 podman[133766]: 2026-03-10 09:56:31.397609466 +0000 UTC m=+0.010453298 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:56:31.619 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:31 vm00 podman[133766]: 2026-03-10 09:56:31.509662459 +0000 UTC m=+0.122506291 container create 965a991f28cff104038f4ee54ea699ac0ebbfa6af6cf94c7103ebec993d9dcdf (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, org.label-schema.license=GPLv2) 2026-03-10T09:56:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:31 vm00 ceph-mon[118593]: pgmap v44: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 785 B/s rd, 0 op/s 2026-03-10T09:56:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:32.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:32.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:31 vm00 ceph-mon[121907]: pgmap v44: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 785 B/s rd, 0 op/s 2026-03-10T09:56:32.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:32.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:32.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:31 vm00 podman[133766]: 2026-03-10 09:56:31.72808667 +0000 UTC m=+0.340930511 container init 965a991f28cff104038f4ee54ea699ac0ebbfa6af6cf94c7103ebec993d9dcdf (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:56:32.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:31 vm00 podman[133766]: 2026-03-10 09:56:31.73059231 +0000 UTC m=+0.343436142 container start 965a991f28cff104038f4ee54ea699ac0ebbfa6af6cf94c7103ebec993d9dcdf (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2) 2026-03-10T09:56:32.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:31 vm00 ceph-osd[133782]: -- 192.168.123.100:0/884965829 <== mon.1 v2:192.168.123.100:3301/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x55d3d6e56960 con 0x55d3d7c40400 2026-03-10T09:56:32.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:31 vm00 bash[133766]: 965a991f28cff104038f4ee54ea699ac0ebbfa6af6cf94c7103ebec993d9dcdf 2026-03-10T09:56:32.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:31 vm00 systemd[1]: Started Ceph osd.0 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:56:32.240 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:31 vm03 ceph-mon[123760]: pgmap v44: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 785 B/s rd, 0 op/s 2026-03-10T09:56:32.240 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:32.240 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:32.869 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:32 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[133778]: 2026-03-10T09:56:32.579+0000 7fdc9e753740 -1 Falling back to public interface 2026-03-10T09:56:33.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:33.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: pgmap v45: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 785 B/s rd, 0 op/s 2026-03-10T09:56:33.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:33.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:33.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: pgmap v45: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 785 B/s rd, 0 op/s 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: pgmap v45: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 785 B/s rd, 0 op/s 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:33.759 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[133778]: 2026-03-10T09:56:33.455+0000 7fdc9e753740 -1 osd.0 0 read_superblock omap replica is missing. 2026-03-10T09:56:33.759 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:33 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[133778]: 2026-03-10T09:56:33.492+0000 7fdc9e753740 -1 osd.0 96 log_to_monitors true 2026-03-10T09:56:34.547 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v46: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 785 B/s rd, 0 op/s 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v47: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v48: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v49: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v50: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v51: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.842 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v52: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v53: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v54: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v55: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='osd.0 [v2:192.168.123.100:6802/2188414362,v1:192.168.123.100:6803/2188414362]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v56: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: pgmap v57: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.843 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v46: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 785 B/s rd, 0 op/s 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v47: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v48: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v49: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v50: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v51: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v52: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v53: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v54: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v55: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='osd.0 [v2:192.168.123.100:6802/2188414362,v1:192.168.123.100:6803/2188414362]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v56: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: pgmap v57: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v46: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 785 B/s rd, 0 op/s 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v47: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v48: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v49: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v50: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v51: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v52: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v53: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v54: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v55: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='osd.0 [v2:192.168.123.100:6802/2188414362,v1:192.168.123.100:6803/2188414362]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v56: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: pgmap v57: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:34.873 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:34.873 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:34 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[133778]: 2026-03-10T09:56:34.761+0000 7fdc964fe640 -1 osd.0 96 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:56:35.856 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: pgmap v58: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: pgmap v59: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: pgmap v60: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: pgmap v61: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: pgmap v62: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: pgmap v63: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 69/627 objects degraded (11.005%), 22 pgs degraded (PG_DEGRADED) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: pgmap v64: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: pgmap v65: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: osdmap e99: 8 total, 7 up, 8 in 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='osd.0 [v2:192.168.123.100:6802/2188414362,v1:192.168.123.100:6803/2188414362]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: pgmap v58: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: pgmap v59: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.857 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: pgmap v60: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: pgmap v61: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: pgmap v62: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: pgmap v63: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 69/627 objects degraded (11.005%), 22 pgs degraded (PG_DEGRADED) 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: pgmap v64: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: pgmap v65: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: osdmap e99: 8 total, 7 up, 8 in 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='osd.0 [v2:192.168.123.100:6802/2188414362,v1:192.168.123.100:6803/2188414362]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:35.858 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: pgmap v58: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: pgmap v59: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: pgmap v60: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: pgmap v61: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: pgmap v62: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: pgmap v63: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 69/627 objects degraded (11.005%), 22 pgs degraded (PG_DEGRADED) 2026-03-10T09:56:36.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: pgmap v64: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: pgmap v65: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: osdmap e99: 8 total, 7 up, 8 in 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='osd.0 [v2:192.168.123.100:6802/2188414362,v1:192.168.123.100:6803/2188414362]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: pgmap v67: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: pgmap v67: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: pgmap v68: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: pgmap v69: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: pgmap v70: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: pgmap v71: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: pgmap v68: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: pgmap v69: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: pgmap v70: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: pgmap v71: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: osd.0 [v2:192.168.123.100:6802/2188414362,v1:192.168.123.100:6803/2188414362] boot 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: osdmap e100: 8 total, 8 up, 8 in 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: osd.0 [v2:192.168.123.100:6802/2188414362,v1:192.168.123.100:6803/2188414362] boot 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: osdmap e100: 8 total, 8 up, 8 in 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: pgmap v67: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: pgmap v68: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: pgmap v69: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: pgmap v70: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: pgmap v71: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: osd.0 [v2:192.168.123.100:6802/2188414362,v1:192.168.123.100:6803/2188414362] boot 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: osdmap e100: 8 total, 8 up, 8 in 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:36.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:37.297 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:36 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: pgmap v72: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: pgmap v73: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: pgmap v75: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: pgmap v76: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: pgmap v77: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: pgmap v78: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: pgmap v79: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: pgmap v80: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: osdmap e101: 8 total, 8 up, 8 in 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: pgmap v72: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: pgmap v73: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: pgmap v75: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: pgmap v76: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: pgmap v77: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.327 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: pgmap v78: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: pgmap v79: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: pgmap v80: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: osdmap e101: 8 total, 8 up, 8 in 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:38.328 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:38 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:38.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: pgmap v72: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: pgmap v73: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: pgmap v75: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: pgmap v76: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: pgmap v77: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: pgmap v78: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: pgmap v79: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: pgmap v80: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: osdmap e101: 8 total, 8 up, 8 in 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:38.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:38 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: pgmap v82: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: pgmap v83: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: pgmap v84: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: pgmap v85: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: pgmap v86: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.172 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: Upgrade: osd.0 is safe to restart 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: pgmap v82: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: pgmap v83: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: pgmap v84: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: pgmap v85: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: pgmap v86: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: Upgrade: osd.0 is safe to restart 2026-03-10T09:56:39.173 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: pgmap v82: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: pgmap v83: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: pgmap v84: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: pgmap v85: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: pgmap v86: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: Upgrade: osd.0 is safe to restart 2026-03-10T09:56:39.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:40 vm00 ceph-mon[118593]: Upgrade: Updating osd.0 2026-03-10T09:56:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:40 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:56:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:40 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:40.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:40 vm00 ceph-mon[118593]: Deploying daemon osd.0 on vm00 2026-03-10T09:56:40.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:40 vm00 ceph-mon[121907]: Upgrade: Updating osd.0 2026-03-10T09:56:40.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:40 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:56:40.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:40 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:40.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:40 vm00 ceph-mon[121907]: Deploying daemon osd.0 on vm00 2026-03-10T09:56:40.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:40 vm03 ceph-mon[123760]: Upgrade: Updating osd.0 2026-03-10T09:56:40.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:40 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T09:56:40.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:40 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:40.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:40 vm03 ceph-mon[123760]: Deploying daemon osd.0 on vm00 2026-03-10T09:56:40.939 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:56:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:56:40] "GET /metrics HTTP/1.1" 200 38023 "" "Prometheus/2.51.0" 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[118593]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1 op/s 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 69/627 objects degraded (11.005%), 22 pgs degraded) 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[121907]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1 op/s 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 69/627 objects degraded (11.005%), 22 pgs degraded) 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.620 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:41 vm00 systemd[1]: Stopping Ceph osd.0 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:56:41.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:41 vm03 ceph-mon[123760]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1 op/s 2026-03-10T09:56:41.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:41 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 69/627 objects degraded (11.005%), 22 pgs degraded) 2026-03-10T09:56:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:42.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[133778]: 2026-03-10T09:56:41.655+0000 7fdc9b6e8640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:56:42.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[133778]: 2026-03-10T09:56:41.655+0000 7fdc9b6e8640 -1 osd.0 101 *** Got signal Terminated *** 2026-03-10T09:56:42.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[133778]: 2026-03-10T09:56:41.655+0000 7fdc9b6e8640 -1 osd.0 101 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:56:42.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:42 vm03 ceph-mon[123760]: osd.0 marked itself down and dead 2026-03-10T09:56:42.809 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:42 vm00 ceph-mon[118593]: osd.0 marked itself down and dead 2026-03-10T09:56:42.809 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:42 vm00 ceph-mon[121907]: osd.0 marked itself down and dead 2026-03-10T09:56:42.810 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 podman[136045]: 2026-03-10 09:56:42.555777522 +0000 UTC m=+0.910830315 container died 965a991f28cff104038f4ee54ea699ac0ebbfa6af6cf94c7103ebec993d9dcdf (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, ceph=True) 2026-03-10T09:56:42.810 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 podman[136045]: 2026-03-10 09:56:42.587292052 +0000 UTC m=+0.942344834 container remove 965a991f28cff104038f4ee54ea699ac0ebbfa6af6cf94c7103ebec993d9dcdf (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0) 2026-03-10T09:56:42.810 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 bash[136045]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0 2026-03-10T09:56:42.810 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 podman[136112]: 2026-03-10 09:56:42.718956825 +0000 UTC m=+0.017889738 container create d82c10fbe7cc26b202fc905579d6afd02907c5698be74f5f89086c1c02bc483d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:56:42.811 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 podman[136112]: 2026-03-10 09:56:42.757464274 +0000 UTC m=+0.056397197 container init d82c10fbe7cc26b202fc905579d6afd02907c5698be74f5f89086c1c02bc483d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True) 2026-03-10T09:56:42.811 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 podman[136112]: 2026-03-10 09:56:42.760625321 +0000 UTC m=+0.059558223 container start d82c10fbe7cc26b202fc905579d6afd02907c5698be74f5f89086c1c02bc483d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.build-date=20260223) 2026-03-10T09:56:42.811 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 podman[136112]: 2026-03-10 09:56:42.761609352 +0000 UTC m=+0.060542265 container attach d82c10fbe7cc26b202fc905579d6afd02907c5698be74f5f89086c1c02bc483d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid) 2026-03-10T09:56:42.811 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 podman[136112]: 2026-03-10 09:56:42.710900434 +0000 UTC m=+0.009833358 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:56:43.112 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 podman[136131]: 2026-03-10 09:56:42.905545715 +0000 UTC m=+0.010748380 container died d82c10fbe7cc26b202fc905579d6afd02907c5698be74f5f89086c1c02bc483d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0) 2026-03-10T09:56:43.112 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 podman[136131]: 2026-03-10 09:56:42.921286058 +0000 UTC m=+0.026488732 container remove d82c10fbe7cc26b202fc905579d6afd02907c5698be74f5f89086c1c02bc483d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=squid) 2026-03-10T09:56:43.112 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.0.service: Deactivated successfully. 2026-03-10T09:56:43.112 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:42 vm00 systemd[1]: Stopped Ceph osd.0 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 systemd[1]: Starting Ceph osd.0 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 podman[136217]: 2026-03-10 09:56:43.199479417 +0000 UTC m=+0.017251623 container create 895ae8eaba117916d49cf88e184552adf3d54ac6e3be117651f076dd98464968 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223) 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 podman[136217]: 2026-03-10 09:56:43.2378662 +0000 UTC m=+0.055638417 container init 895ae8eaba117916d49cf88e184552adf3d54ac6e3be117651f076dd98464968 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 podman[136217]: 2026-03-10 09:56:43.240596791 +0000 UTC m=+0.058368998 container start 895ae8eaba117916d49cf88e184552adf3d54ac6e3be117651f076dd98464968 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2) 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 podman[136217]: 2026-03-10 09:56:43.241860066 +0000 UTC m=+0.059632262 container attach 895ae8eaba117916d49cf88e184552adf3d54ac6e3be117651f076dd98464968 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 podman[136217]: 2026-03-10 09:56:43.191342456 +0000 UTC m=+0.009114672 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 bash[136217]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:43.369 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 bash[136217]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:43.720 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:43 vm03 ceph-mon[123760]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:56:43.720 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:43 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:56:43.720 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:43 vm03 ceph-mon[123760]: osdmap e102: 8 total, 7 up, 8 in 2026-03-10T09:56:43.832 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:43 vm00 ceph-mon[118593]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:56:43.833 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:43 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:56:43.833 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:43 vm00 ceph-mon[118593]: osdmap e102: 8 total, 7 up, 8 in 2026-03-10T09:56:43.833 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:43 vm00 ceph-mon[121907]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:56:43.833 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:43 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:56:43.833 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:43 vm00 ceph-mon[121907]: osdmap e102: 8 total, 7 up, 8 in 2026-03-10T09:56:44.119 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:56:44.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 bash[136217]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:56:44.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:44.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 bash[136217]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:44.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:44.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 bash[136217]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:56:44.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T09:56:44.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 bash[136217]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T09:56:44.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f91417ba-0e37-4e02-ae90-cadd691e5d10/osd-block-74379a85-5860-4155-922a-ce2adedc2262 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-10T09:56:44.120 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:43 vm00 bash[136217]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f91417ba-0e37-4e02-ae90-cadd691e5d10/osd-block-74379a85-5860-4155-922a-ce2adedc2262 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/ln -snf /dev/ceph-f91417ba-0e37-4e02-ae90-cadd691e5d10/osd-block-74379a85-5860-4155-922a-ce2adedc2262 /var/lib/ceph/osd/ceph-0/block 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 bash[136217]: Running command: /usr/bin/ln -snf /dev/ceph-f91417ba-0e37-4e02-ae90-cadd691e5d10/osd-block-74379a85-5860-4155-922a-ce2adedc2262 /var/lib/ceph/osd/ceph-0/block 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 bash[136217]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 bash[136217]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 bash[136217]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate[136228]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 bash[136217]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 podman[136217]: 2026-03-10 09:56:44.229103591 +0000 UTC m=+1.046875797 container died 895ae8eaba117916d49cf88e184552adf3d54ac6e3be117651f076dd98464968 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 podman[136217]: 2026-03-10 09:56:44.251883501 +0000 UTC m=+1.069655707 container remove 895ae8eaba117916d49cf88e184552adf3d54ac6e3be117651f076dd98464968 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-activate, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS) 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 podman[136479]: 2026-03-10 09:56:44.343346443 +0000 UTC m=+0.016570467 container create 6eea3ec528dbcaeb5c48e048d70ed59c92090930ab2fe3593f873227c43de518 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, org.label-schema.vendor=CentOS, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 podman[136479]: 2026-03-10 09:56:44.382238942 +0000 UTC m=+0.055462966 container init 6eea3ec528dbcaeb5c48e048d70ed59c92090930ab2fe3593f873227c43de518 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 podman[136479]: 2026-03-10 09:56:44.385296145 +0000 UTC m=+0.058520169 container start 6eea3ec528dbcaeb5c48e048d70ed59c92090930ab2fe3593f873227c43de518 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 bash[136479]: 6eea3ec528dbcaeb5c48e048d70ed59c92090930ab2fe3593f873227c43de518 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 podman[136479]: 2026-03-10 09:56:44.33631948 +0000 UTC m=+0.009543513 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:56:44.506 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 systemd[1]: Started Ceph osd.0 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:56:44.548 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:44.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-10T09:56:44.548 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:44 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:44.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:44.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:44 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:44.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:44 vm00 ceph-mon[118593]: osdmap e103: 8 total, 7 up, 8 in 2026-03-10T09:56:44.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:44.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:44.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:44 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:44.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:44 vm00 ceph-mon[121907]: osdmap e103: 8 total, 7 up, 8 in 2026-03-10T09:56:44.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:44.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:44.874 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:44 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[136489]: 2026-03-10T09:56:44.726+0000 7f49dc5f2740 -1 Falling back to public interface 2026-03-10T09:56:45.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:44 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:45.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:44 vm03 ceph-mon[123760]: osdmap e103: 8 total, 7 up, 8 in 2026-03-10T09:56:45.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: pgmap v91: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: Health check failed: Reduced data availability: 8 pgs inactive, 8 pgs peering (PG_AVAILABILITY) 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.785 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.786 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.786 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.786 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: pgmap v91: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: Health check failed: Reduced data availability: 8 pgs inactive, 8 pgs peering (PG_AVAILABILITY) 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: pgmap v91: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: Health check failed: Reduced data availability: 8 pgs inactive, 8 pgs peering (PG_AVAILABILITY) 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:45.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:45.871 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[136489]: 2026-03-10T09:56:45.667+0000 7f49dc5f2740 -1 osd.0 101 log_to_monitors true 2026-03-10T09:56:46.740 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v92: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v93: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v94: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v95: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v96: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v97: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v98: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v99: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v100: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v101: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v102: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v103: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v104: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: pgmap v105: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v92: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v93: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v94: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v95: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v96: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v97: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v98: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v99: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v100: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v101: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v102: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v103: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v104: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: pgmap v105: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v92: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v93: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v94: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v95: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v96: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v97: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v98: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v99: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v100: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v101: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v102: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v103: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v104: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: pgmap v105: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:46.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:47.298 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:46 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 6s ago 10m - - 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 14s ago 10m - - 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (4m) 6s ago 8m 26.6M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (4m) 14s ago 8m 47.7M - dad864ee21e9 011f2081bf92 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (2m) 6s ago 7m 52.5M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (100s) 14s ago 9m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (3m) 6s ago 10m 562M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (95s) 6s ago 10m 60.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (70s) 14s ago 10m 41.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (76s) 6s ago 10m 45.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (4m) 6s ago 8m 9671k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (4m) 14s ago 8m 9647k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 starting - - - 4096M 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (9m) 6s ago 9m 56.3M 4096M 17.2.0 e1d6a67b021e de45ae7a43f6 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (9m) 6s ago 9m 50.7M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (9m) 6s ago 9m 53.1M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (9m) 14s ago 9m 55.7M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (9m) 14s ago 9m 53.1M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (8m) 14s ago 8m 51.1M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (8m) 14s ago 8m 52.8M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (2m) 14s ago 8m 44.0M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:56:47.417 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (7m) 6s ago 7m 94.2M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:56:47.418 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (7m) 14s ago 7m 95.3M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:56:47.418 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (7m) 6s ago 7m 95.0M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:56:47.418 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (7m) 14s ago 7m 92.5M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:56:47.831 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:56:47.831 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:56:47.831 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:56:47.831 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:56:47.831 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:56:47.831 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:56:47.831 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:56:47.831 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 7 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 11, 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:56:47.832 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:56:47.971 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: pgmap v106: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: pgmap v107: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: pgmap v108: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: pgmap v109: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: pgmap v110: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: pgmap v111: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: pgmap v112: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: pgmap v113: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: pgmap v114: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: osdmap e104: 8 total, 7 up, 8 in 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:47.972 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: pgmap v106: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: pgmap v107: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: pgmap v108: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: pgmap v109: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: pgmap v110: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: pgmap v111: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: pgmap v112: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: pgmap v113: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: pgmap v114: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: osdmap e104: 8 total, 7 up, 8 in 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:47.973 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: pgmap v106: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: pgmap v107: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: pgmap v108: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: pgmap v109: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: pgmap v110: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: pgmap v111: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: pgmap v112: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: pgmap v113: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: pgmap v114: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: osdmap e104: 8 total, 7 up, 8 in 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:48.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:48.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "5/23 daemons upgraded", 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:56:48.109 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:56:48.425 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline; 1 osds down; Reduced data availability: 8 pgs inactive, 8 pgs peering 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] OSD_DOWN: 1 osds down 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: osd.0 (root=default,host=vm00) is down 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] PG_AVAILABILITY: Reduced data availability: 8 pgs inactive, 8 pgs peering 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.6 is stuck peering for 7m, current state peering, last acting [5,2] 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.1e is stuck peering for 8m, current state peering, last acting [7,3] 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: pg 5.3 is stuck peering for 8m, current state peering, last acting [6,5] 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: pg 5.1e is stuck peering for 8m, current state peering, last acting [7,2] 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: pg 6.0 is stuck peering for 7m, current state peering, last acting [3,2] 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: pg 6.9 is stuck peering for 7m, current state peering, last acting [7,2] 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: pg 6.10 is stuck peering for 7m, current state peering, last acting [5,1] 2026-03-10T09:56:48.426 INFO:teuthology.orchestra.run.vm00.stdout: pg 6.16 is stuck peering for 7m, current state peering, last acting [7,3] 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='client.34200 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: pgmap v116: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: pgmap v117: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='client.54140 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: pgmap v118: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='client.34206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: pgmap v119: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/1089827567' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='client.54158 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: pgmap v120: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/1283195487' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:56:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: pgmap v121: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.5 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='client.34200 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: pgmap v116: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: pgmap v117: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='client.54140 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: pgmap v118: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='client.34206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: pgmap v119: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/1089827567' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='client.54158 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: pgmap v120: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/1283195487' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: pgmap v121: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.5 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='client.34200 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: pgmap v116: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: pgmap v117: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.087 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='client.54140 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: pgmap v118: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='client.34206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: pgmap v119: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/1089827567' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='client.54158 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: pgmap v120: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/1283195487' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: pgmap v121: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.5 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:49.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:49.088 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 09:56:48 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[136489]: 2026-03-10T09:56:48.751+0000 7f49d3b9c640 -1 osd.0 101 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: OSD bench result of 14178.649279 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 69/627 objects degraded (11.005%), 22 pgs degraded (PG_DEGRADED) 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 8 pgs inactive, 8 pgs peering) 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: pgmap v122: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: pgmap v123: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:50.407 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: pgmap v124: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.5 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: pgmap v125: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 3 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: pgmap v126: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 3 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: pgmap v127: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: OSD bench result of 14178.649279 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 69/627 objects degraded (11.005%), 22 pgs degraded (PG_DEGRADED) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 8 pgs inactive, 8 pgs peering) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: pgmap v122: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: pgmap v123: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: pgmap v124: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.5 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: pgmap v125: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 3 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: pgmap v126: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 3 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: pgmap v127: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: OSD bench result of 14178.649279 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 69/627 objects degraded (11.005%), 22 pgs degraded (PG_DEGRADED) 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 8 pgs inactive, 8 pgs peering) 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: pgmap v122: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: pgmap v123: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: pgmap v124: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 2.5 KiB/s rd, 2 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: pgmap v125: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 3 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: pgmap v126: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 3 op/s; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: pgmap v127: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:50.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.048 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:56:50 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:56:50] "GET /metrics HTTP/1.1" 200 37961 "" "Prometheus/2.51.0" 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503] boot 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: osdmap e105: 8 total, 8 up, 8 in 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: pgmap v129: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: pgmap v130: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: pgmap v131: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: osdmap e106: 8 total, 8 up, 8 in 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503] boot 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: osdmap e105: 8 total, 8 up, 8 in 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: pgmap v129: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: pgmap v130: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: pgmap v131: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: osdmap e106: 8 total, 8 up, 8 in 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: osd.0 [v2:192.168.123.100:6802/3985073503,v1:192.168.123.100:6803/3985073503] boot 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: osdmap e105: 8 total, 8 up, 8 in 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: pgmap v129: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: pgmap v130: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: pgmap v131: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: osdmap e106: 8 total, 8 up, 8 in 2026-03-10T09:56:51.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:51.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:51.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:51.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: pgmap v132: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:52.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:52.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: pgmap v134: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:52.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:52.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: pgmap v135: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: pgmap v132: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: pgmap v134: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: pgmap v135: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: pgmap v132: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: pgmap v134: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: pgmap v135: 161 pgs: 36 active+undersized, 22 active+undersized+degraded, 103 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 69/627 objects degraded (11.005%) 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.191 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:56:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:56:52.937+0000 7f8211476640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v136: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v137: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v138: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v139: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v140: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v141: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.449 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v142: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v143: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v144: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: pgmap v145: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v136: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v137: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v138: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v139: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v140: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v141: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v142: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v143: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v144: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: pgmap v145: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v136: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v137: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v138: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v139: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v140: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v141: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v142: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v143: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v144: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: pgmap v145: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:56:53.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:56:54.455 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:54 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:54.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-10T09:56:54.455 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:54 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:54.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:54.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:54 vm03 ceph-mon[123760]: pgmap v146: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:54.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:54 vm03 ceph-mon[123760]: pgmap v147: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:54.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:54 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:56:54.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:54 vm03 ceph-mon[123760]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T09:56:54.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:54 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:54.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:54.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:54.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[121907]: pgmap v146: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[121907]: pgmap v147: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[121907]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[118593]: pgmap v146: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[118593]: pgmap v147: 161 pgs: 24 active+undersized, 13 active+undersized+degraded, 124 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[118593]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T09:56:54.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:56:54.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:54.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:54.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:56.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:55 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 38/627 objects degraded (6.061%), 13 pgs degraded) 2026-03-10T09:56:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:55 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 38/627 objects degraded (6.061%), 13 pgs degraded) 2026-03-10T09:56:56.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:55 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 38/627 objects degraded (6.061%), 13 pgs degraded) 2026-03-10T09:56:56.948 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:56 vm03 ceph-mon[123760]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T09:56:57.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:56 vm00 ceph-mon[118593]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T09:56:57.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:56 vm00 ceph-mon[121907]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T09:56:57.297 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:56:56 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:56:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:56:58.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:58 vm03 ceph-mon[123760]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:56:58.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:56:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:58.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:58 vm00 ceph-mon[118593]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:56:58.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:56:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:56:58.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:58 vm00 ceph-mon[121907]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:56:58.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:56:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:00.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:00 vm03 ceph-mon[123760]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1 op/s 2026-03-10T09:57:00.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:00 vm00 ceph-mon[118593]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1 op/s 2026-03-10T09:57:00.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:00 vm00 ceph-mon[121907]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 1 op/s 2026-03-10T09:57:01.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:57:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:57:00] "GET /metrics HTTP/1.1" 200 37961 "" "Prometheus/2.51.0" 2026-03-10T09:57:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:02 vm03 ceph-mon[123760]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:02.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:02 vm00 ceph-mon[118593]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:02.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:02.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:02 vm00 ceph-mon[121907]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:02.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:04.496 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:57:04 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:57:04.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-10T09:57:04.497 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:57:04 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:57:04.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:57:04.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:04 vm03 ceph-mon[123760]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:57:04.497 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:04 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:04 vm00 ceph-mon[118593]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:57:04.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:04 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:04.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:04 vm00 ceph-mon[121907]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:57:04.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:04 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:06.244 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:06 vm03 ceph-mon[123760]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:06.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:06 vm00 ceph-mon[118593]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:06.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:06 vm00 ceph-mon[121907]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:07.298 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:57:06 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:57:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:57:08.514 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:08 vm00 ceph-mon[118593]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:57:08.514 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:08.514 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:08 vm00 ceph-mon[121907]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:57:08.514 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:08.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:08 vm03 ceph-mon[123760]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:57:08.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:09.069 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 systemd[1]: Stopping Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:57:09.339 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[62684]: 2026-03-10T09:57:09.139+0000 7f0184733700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:57:09.339 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[62684]: 2026-03-10T09:57:09.139+0000 7f0184733700 -1 osd.1 106 *** Got signal Terminated *** 2026-03-10T09:57:09.339 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[62684]: 2026-03-10T09:57:09.139+0000 7f0184733700 -1 osd.1 106 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:57:09.619 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 podman[139575]: 2026-03-10 09:57:09.388097352 +0000 UTC m=+0.259509939 container died de45ae7a43f6f6102c0349bf1a462e91749d937a34af16f5c1523fe0889fcd16 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, name=centos-stream, build-date=2022-05-03T08:36:31.336870, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, architecture=x86_64, ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, RELEASE=HEAD, io.openshift.expose-services=, vcs-type=git, version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_REPO=https://github.com/ceph/ceph-container.git, GIT_CLEAN=True, distribution-scope=public, com.redhat.component=centos-stream-container, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, release=754) 2026-03-10T09:57:09.619 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 podman[139575]: 2026-03-10 09:57:09.40803979 +0000 UTC m=+0.279452377 container remove de45ae7a43f6f6102c0349bf1a462e91749d937a34af16f5c1523fe0889fcd16 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, io.openshift.expose-services=, release=754, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_REPO=https://github.com/ceph/ceph-container.git, name=centos-stream, distribution-scope=public, maintainer=Guillaume Abrioux , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc., version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, vcs-type=git, io.buildah.version=1.19.8, architecture=x86_64, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_BRANCH=HEAD, ceph=True, RELEASE=HEAD, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0) 2026-03-10T09:57:09.619 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 bash[139575]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1 2026-03-10T09:57:09.620 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 podman[139641]: 2026-03-10 09:57:09.551896044 +0000 UTC m=+0.017764724 container create c962f5e4ac805e1f73d833c7903b4726fa57a5e1dedae98268b53225f0a0453b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS) 2026-03-10T09:57:09.620 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 podman[139641]: 2026-03-10 09:57:09.584407599 +0000 UTC m=+0.050276279 container init c962f5e4ac805e1f73d833c7903b4726fa57a5e1dedae98268b53225f0a0453b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223) 2026-03-10T09:57:09.620 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 podman[139641]: 2026-03-10 09:57:09.587703128 +0000 UTC m=+0.053571808 container start c962f5e4ac805e1f73d833c7903b4726fa57a5e1dedae98268b53225f0a0453b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-10T09:57:09.620 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 podman[139641]: 2026-03-10 09:57:09.589210319 +0000 UTC m=+0.055078999 container attach c962f5e4ac805e1f73d833c7903b4726fa57a5e1dedae98268b53225f0a0453b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[118593]: Upgrade: osd.1 is safe to restart 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[118593]: Upgrade: Updating osd.1 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[118593]: Deploying daemon osd.1 on vm00 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[118593]: osd.1 marked itself down and dead 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[121907]: Upgrade: osd.1 is safe to restart 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[121907]: Upgrade: Updating osd.1 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[121907]: Deploying daemon osd.1 on vm00 2026-03-10T09:57:09.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:09 vm00 ceph-mon[121907]: osd.1 marked itself down and dead 2026-03-10T09:57:09.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:09 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:09.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:09 vm03 ceph-mon[123760]: Upgrade: osd.1 is safe to restart 2026-03-10T09:57:09.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:09 vm03 ceph-mon[123760]: Upgrade: Updating osd.1 2026-03-10T09:57:09.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:57:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:09 vm03 ceph-mon[123760]: Deploying daemon osd.1 on vm00 2026-03-10T09:57:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:09 vm03 ceph-mon[123760]: osd.1 marked itself down and dead 2026-03-10T09:57:09.897 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 podman[139641]: 2026-03-10 09:57:09.545569512 +0000 UTC m=+0.011438201 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:57:09.897 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 podman[139659]: 2026-03-10 09:57:09.744122898 +0000 UTC m=+0.010396290 container died c962f5e4ac805e1f73d833c7903b4726fa57a5e1dedae98268b53225f0a0453b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-10T09:57:09.897 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 podman[139659]: 2026-03-10 09:57:09.760475206 +0000 UTC m=+0.026748598 container remove c962f5e4ac805e1f73d833c7903b4726fa57a5e1dedae98268b53225f0a0453b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:57:09.897 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1.service: Deactivated successfully. 2026-03-10T09:57:09.897 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 systemd[1]: Stopped Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:57:09.897 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1.service: Consumed 4.165s CPU time. 2026-03-10T09:57:10.162 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:09 vm00 systemd[1]: Starting Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:57:10.162 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 podman[139747]: 2026-03-10 09:57:10.071701507 +0000 UTC m=+0.018605668 container create 41bc8998819f9e191ee07dd6edcbe26eb14aef6e83eb3588c317b6d4ed43e56f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True, OSD_FLAVOR=default) 2026-03-10T09:57:10.162 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 podman[139747]: 2026-03-10 09:57:10.118184255 +0000 UTC m=+0.065088406 container init 41bc8998819f9e191ee07dd6edcbe26eb14aef6e83eb3588c317b6d4ed43e56f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:57:10.162 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 podman[139747]: 2026-03-10 09:57:10.121559122 +0000 UTC m=+0.068463272 container start 41bc8998819f9e191ee07dd6edcbe26eb14aef6e83eb3588c317b6d4ed43e56f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:57:10.162 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 podman[139747]: 2026-03-10 09:57:10.12286693 +0000 UTC m=+0.069771091 container attach 41bc8998819f9e191ee07dd6edcbe26eb14aef6e83eb3588c317b6d4ed43e56f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=squid, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.license=GPLv2) 2026-03-10T09:57:10.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:10 vm00 ceph-mon[118593]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:10 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:57:10.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:10 vm00 ceph-mon[118593]: osdmap e107: 8 total, 7 up, 8 in 2026-03-10T09:57:10.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:10 vm00 ceph-mon[121907]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:10.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:10 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:57:10.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:10 vm00 ceph-mon[121907]: osdmap e107: 8 total, 7 up, 8 in 2026-03-10T09:57:10.620 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 podman[139747]: 2026-03-10 09:57:10.063722091 +0000 UTC m=+0.010626262 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:57:10.620 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:10.620 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 bash[139747]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:10.620 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:10.620 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 bash[139747]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:10 vm03 ceph-mon[123760]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:10 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:57:10.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:10 vm03 ceph-mon[123760]: osdmap e107: 8 total, 7 up, 8 in 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 bash[139747]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 bash[139747]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 bash[139747]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 bash[139747]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b86ddfd2-2237-44fb-b1e4-096f4c84336a/osd-block-718beebe-c05a-490a-835a-00fdd797508b --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-10T09:57:11.054 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:10 vm00 bash[139747]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b86ddfd2-2237-44fb-b1e4-096f4c84336a/osd-block-718beebe-c05a-490a-835a-00fdd797508b --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-10T09:57:11.055 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:57:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:57:10] "GET /metrics HTTP/1.1" 200 38109 "" "Prometheus/2.51.0" 2026-03-10T09:57:11.372 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/ln -snf /dev/ceph-b86ddfd2-2237-44fb-b1e4-096f4c84336a/osd-block-718beebe-c05a-490a-835a-00fdd797508b /var/lib/ceph/osd/ceph-1/block 2026-03-10T09:57:11.372 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 bash[139747]: Running command: /usr/bin/ln -snf /dev/ceph-b86ddfd2-2237-44fb-b1e4-096f4c84336a/osd-block-718beebe-c05a-490a-835a-00fdd797508b /var/lib/ceph/osd/ceph-1/block 2026-03-10T09:57:11.372 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-10T09:57:11.372 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 bash[139747]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-10T09:57:11.372 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T09:57:11.372 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 bash[139747]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 bash[139747]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[139758]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 bash[139747]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 conmon[139758]: conmon 41bc8998819f9e191ee0 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-41bc8998819f9e191ee07dd6edcbe26eb14aef6e83eb3588c317b6d4ed43e56f.scope/container/memory.events 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 podman[139747]: 2026-03-10 09:57:11.087636748 +0000 UTC m=+1.034540909 container died 41bc8998819f9e191ee07dd6edcbe26eb14aef6e83eb3588c317b6d4ed43e56f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default) 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 podman[139747]: 2026-03-10 09:57:11.10644929 +0000 UTC m=+1.053353451 container remove 41bc8998819f9e191ee07dd6edcbe26eb14aef6e83eb3588c317b6d4ed43e56f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0) 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 podman[140003]: 2026-03-10 09:57:11.209032719 +0000 UTC m=+0.019154936 container create df062f9af9f3402122ae97b70a2dce29773367d7cf3e466f04186b33a0ee2f32 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid) 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 podman[140003]: 2026-03-10 09:57:11.240669838 +0000 UTC m=+0.050792064 container init df062f9af9f3402122ae97b70a2dce29773367d7cf3e466f04186b33a0ee2f32 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3) 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 podman[140003]: 2026-03-10 09:57:11.24729536 +0000 UTC m=+0.057417577 container start df062f9af9f3402122ae97b70a2dce29773367d7cf3e466f04186b33a0ee2f32 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223) 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 bash[140003]: df062f9af9f3402122ae97b70a2dce29773367d7cf3e466f04186b33a0ee2f32 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 podman[140003]: 2026-03-10 09:57:11.201251554 +0000 UTC m=+0.011373781 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:57:11.373 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 systemd[1]: Started Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:57:11.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:11 vm03 ceph-mon[123760]: osdmap e108: 8 total, 7 up, 8 in 2026-03-10T09:57:11.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:11.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:11.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:11.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:11.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:11 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[118593]: osdmap e108: 8 total, 7 up, 8 in 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[121907]: osdmap e108: 8 total, 7 up, 8 in 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:11.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:11 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:11.870 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:11 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[140013]: 2026-03-10T09:57:11.568+0000 7f093014e740 -1 Falling back to public interface 2026-03-10T09:57:12.592 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[140013]: 2026-03-10T09:57:12.468+0000 7f093014e740 -1 osd.1 0 read_superblock omap replica is missing. 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: pgmap v158: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: pgmap v159: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 784 B/s rd, 0 op/s 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: pgmap v160: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: pgmap v161: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: pgmap v162: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: pgmap v163: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: pgmap v164: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: pgmap v165: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.593 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: pgmap v158: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: pgmap v159: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 784 B/s rd, 0 op/s 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: pgmap v160: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: pgmap v161: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: pgmap v162: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: pgmap v163: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: pgmap v164: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: pgmap v165: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: pgmap v158: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: pgmap v159: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 784 B/s rd, 0 op/s 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: pgmap v160: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: pgmap v161: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: pgmap v162: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: pgmap v163: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: pgmap v164: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: pgmap v165: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:12.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:12.869 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:12 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[140013]: 2026-03-10T09:57:12.623+0000 7f093014e740 -1 osd.1 106 log_to_monitors true 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v166: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v167: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v168: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v169: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v170: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v171: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v172: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v173: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v174: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v175: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v176: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v177: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v178: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v179: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 6/627 objects degraded (0.957%), 1 pg degraded (PG_DEGRADED) 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v180: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: pgmap v181: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v166: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v167: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v168: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v169: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v170: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v171: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v172: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v173: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v174: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v175: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v176: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v177: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v178: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v179: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.467 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 6/627 objects degraded (0.957%), 1 pg degraded (PG_DEGRADED) 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v180: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: pgmap v181: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.468 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.787 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[140013]: 2026-03-10T09:57:13.695+0000 7f0927ef9640 -1 osd.1 106 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v166: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v167: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v168: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v169: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v170: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v171: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v172: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v173: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v174: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v175: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v176: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v177: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v178: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v179: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 6/627 objects degraded (0.957%), 1 pg degraded (PG_DEGRADED) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v180: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: pgmap v181: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:13.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:13.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.461 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:57:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:57:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:57:14.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: pgmap v182: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.623 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: pgmap v183: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: pgmap v184: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: pgmap v185: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: pgmap v186: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: pgmap v187: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: pgmap v188: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: osdmap e109: 8 total, 7 up, 8 in 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: pgmap v182: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: pgmap v183: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: pgmap v184: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: pgmap v185: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: pgmap v186: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: pgmap v187: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: pgmap v188: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:14.624 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: osdmap e109: 8 total, 7 up, 8 in 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: pgmap v182: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: pgmap v183: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: pgmap v184: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: pgmap v185: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: pgmap v186: 161 pgs: 5 active+undersized, 20 stale+active+clean, 1 active+undersized+degraded, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 6/627 objects degraded (0.957%) 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: pgmap v187: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: pgmap v188: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: osdmap e109: 8 total, 7 up, 8 in 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: pgmap v190: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: pgmap v191: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: pgmap v192: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: pgmap v193: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: Upgrade: osd.1 is safe to restart 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535] boot 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: osdmap e110: 8 total, 8 up, 8 in 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:57:15.511 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: pgmap v190: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: pgmap v191: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: pgmap v192: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: pgmap v193: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: Upgrade: osd.1 is safe to restart 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535] boot 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: osdmap e110: 8 total, 8 up, 8 in 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:57:15.512 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:15.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: pgmap v190: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: pgmap v191: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 127 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: pgmap v192: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: pgmap v193: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: Upgrade: osd.1 is safe to restart 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: osd.1 [v2:192.168.123.100:6810/4194415535,v1:192.168.123.100:6811/4194415535] boot 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: osdmap e110: 8 total, 8 up, 8 in 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T09:57:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:16.439 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:16 vm00 systemd[1]: Stopping Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:57:16.439 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[140013]: 2026-03-10T09:57:16.344+0000 7f092d0e3640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:57:16.439 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[140013]: 2026-03-10T09:57:16.344+0000 7f092d0e3640 -1 osd.1 111 *** Got signal Terminated *** 2026-03-10T09:57:16.439 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[140013]: 2026-03-10T09:57:16.344+0000 7f092d0e3640 -1 osd.1 111 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:57:16.747 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:16 vm00 ceph-mon[118593]: Upgrade: Updating osd.1 2026-03-10T09:57:16.747 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:16 vm00 ceph-mon[118593]: Deploying daemon osd.1 on vm00 2026-03-10T09:57:16.747 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:16 vm00 ceph-mon[118593]: osdmap e111: 8 total, 8 up, 8 in 2026-03-10T09:57:16.747 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:16 vm00 ceph-mon[118593]: osd.1 marked itself down and dead 2026-03-10T09:57:16.747 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:16 vm00 ceph-mon[121907]: Upgrade: Updating osd.1 2026-03-10T09:57:16.747 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:16 vm00 ceph-mon[121907]: Deploying daemon osd.1 on vm00 2026-03-10T09:57:16.747 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:16 vm00 ceph-mon[121907]: osdmap e111: 8 total, 8 up, 8 in 2026-03-10T09:57:16.747 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:16 vm00 ceph-mon[121907]: osd.1 marked itself down and dead 2026-03-10T09:57:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:16 vm03 ceph-mon[123760]: Upgrade: Updating osd.1 2026-03-10T09:57:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:16 vm03 ceph-mon[123760]: Deploying daemon osd.1 on vm00 2026-03-10T09:57:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:16 vm03 ceph-mon[123760]: osdmap e111: 8 total, 8 up, 8 in 2026-03-10T09:57:16.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:16 vm03 ceph-mon[123760]: osd.1 marked itself down and dead 2026-03-10T09:57:17.062 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:16 vm00 podman[141783]: 2026-03-10 09:57:16.747908117 +0000 UTC m=+0.436921259 container died df062f9af9f3402122ae97b70a2dce29773367d7cf3e466f04186b33a0ee2f32 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3) 2026-03-10T09:57:17.063 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:16 vm00 podman[141783]: 2026-03-10 09:57:16.803216145 +0000 UTC m=+0.492229277 container remove df062f9af9f3402122ae97b70a2dce29773367d7cf3e466f04186b33a0ee2f32 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223) 2026-03-10T09:57:17.063 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:16 vm00 bash[141783]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1 2026-03-10T09:57:17.298 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:57:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:57:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:57:17.321 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[141968]: 2026-03-10 09:57:17.128817592 +0000 UTC m=+0.045596228 container create a0bfb5f6a5cf6275b9fba56aa28fd5726f9a0942a033ea1c60769a20bd58b48d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True) 2026-03-10T09:57:17.321 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[141968]: 2026-03-10 09:57:17.183159922 +0000 UTC m=+0.099938567 container init a0bfb5f6a5cf6275b9fba56aa28fd5726f9a0942a033ea1c60769a20bd58b48d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True) 2026-03-10T09:57:17.321 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[141968]: 2026-03-10 09:57:17.193249757 +0000 UTC m=+0.110028384 container start a0bfb5f6a5cf6275b9fba56aa28fd5726f9a0942a033ea1c60769a20bd58b48d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2) 2026-03-10T09:57:17.321 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[141968]: 2026-03-10 09:57:17.202945707 +0000 UTC m=+0.119724343 container attach a0bfb5f6a5cf6275b9fba56aa28fd5726f9a0942a033ea1c60769a20bd58b48d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:57:17.321 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[141968]: 2026-03-10 09:57:17.112545193 +0000 UTC m=+0.029323840 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:57:17.584 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[141968]: 2026-03-10 09:57:17.349951074 +0000 UTC m=+0.266729711 container died a0bfb5f6a5cf6275b9fba56aa28fd5726f9a0942a033ea1c60769a20bd58b48d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, OSD_FLAVOR=default) 2026-03-10T09:57:17.584 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[141968]: 2026-03-10 09:57:17.369550221 +0000 UTC m=+0.286328857 container remove a0bfb5f6a5cf6275b9fba56aa28fd5726f9a0942a033ea1c60769a20bd58b48d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_REF=squid) 2026-03-10T09:57:17.584 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1.service: Deactivated successfully. 2026-03-10T09:57:17.584 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1.service: Unit process 141992 (conmon) remains running after unit stopped. 2026-03-10T09:57:17.584 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1.service: Unit process 142018 (podman) remains running after unit stopped. 2026-03-10T09:57:17.584 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 systemd[1]: Stopped Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[118593]: pgmap v196: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[118593]: osdmap e112: 8 total, 7 up, 8 in 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[121907]: pgmap v196: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[121907]: osdmap e112: 8 total, 7 up, 8 in 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:17.584 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:17 vm03 ceph-mon[123760]: pgmap v196: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:17 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:57:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:17 vm03 ceph-mon[123760]: osdmap e112: 8 total, 7 up, 8 in 2026-03-10T09:57:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:17.863 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 systemd[1]: Starting Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:57:17.863 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[142164]: 2026-03-10 09:57:17.823124442 +0000 UTC m=+0.023111973 container create f17f5fd1ab97f136d9925f0c24fedcdb266679957c096fd75e35e5ffc5369bad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:57:18.121 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[142164]: 2026-03-10 09:57:17.88234981 +0000 UTC m=+0.082337352 container init f17f5fd1ab97f136d9925f0c24fedcdb266679957c096fd75e35e5ffc5369bad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:57:18.121 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[142164]: 2026-03-10 09:57:17.893404053 +0000 UTC m=+0.093391584 container start f17f5fd1ab97f136d9925f0c24fedcdb266679957c096fd75e35e5ffc5369bad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid, OSD_FLAVOR=default, io.buildah.version=1.41.3) 2026-03-10T09:57:18.121 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[142164]: 2026-03-10 09:57:17.90162471 +0000 UTC m=+0.101612241 container attach f17f5fd1ab97f136d9925f0c24fedcdb266679957c096fd75e35e5ffc5369bad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:57:18.121 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:17 vm00 podman[142164]: 2026-03-10 09:57:17.810705937 +0000 UTC m=+0.010693477 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:57:18.121 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:18.121 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 bash[142164]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:18.121 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:18.121 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 bash[142164]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:18.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:18.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:18.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:18.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:18 vm00 ceph-mon[118593]: osdmap e113: 8 total, 7 up, 8 in 2026-03-10T09:57:18.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:18.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:18.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:18.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:18 vm00 ceph-mon[121907]: osdmap e113: 8 total, 7 up, 8 in 2026-03-10T09:57:18.858 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:57:18.858 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 bash[142164]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:57:18.858 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 bash[142164]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:18.858 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:18.858 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:18.858 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 bash[142164]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:18.858 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:57:19.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:19.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:19.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:19.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:18 vm03 ceph-mon[123760]: osdmap e113: 8 total, 7 up, 8 in 2026-03-10T09:57:19.120 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T09:57:19.120 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 bash[142164]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T09:57:19.120 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b86ddfd2-2237-44fb-b1e4-096f4c84336a/osd-block-718beebe-c05a-490a-835a-00fdd797508b --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-10T09:57:19.120 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:18 vm00 bash[142164]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b86ddfd2-2237-44fb-b1e4-096f4c84336a/osd-block-718beebe-c05a-490a-835a-00fdd797508b --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 1s ago 10m - - 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 46s ago 10m - - 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (4m) 1s ago 8m 26.6M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (4m) 46s ago 8m 47.7M - dad864ee21e9 011f2081bf92 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (3m) 1s ago 8m 52.6M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (2m) 46s ago 10m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (4m) 1s ago 11m 565M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (2m) 1s ago 11m 63.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (102s) 46s ago 10m 41.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (108s) 1s ago 10m 47.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (4m) 1s ago 8m 9667k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (4m) 46s ago 8m 9647k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (35s) 1s ago 10m 69.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (8s) 1s ago 10m 30.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e df062f9af9f3 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (9m) 1s ago 9m 52.1M 4096M 17.2.0 e1d6a67b021e dc86a99a0403 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (9m) 1s ago 9m 54.2M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (9m) 46s ago 9m 55.7M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (9m) 46s ago 9m 53.1M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (9m) 46s ago 9m 51.1M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (9m) 46s ago 9m 52.8M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (3m) 46s ago 8m 44.0M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (8m) 1s ago 8m 94.3M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (8m) 46s ago 8m 95.3M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (8m) 1s ago 8m 95.2M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:57:19.385 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (8m) 46s ago 8m 92.5M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/ln -snf /dev/ceph-b86ddfd2-2237-44fb-b1e4-096f4c84336a/osd-block-718beebe-c05a-490a-835a-00fdd797508b /var/lib/ceph/osd/ceph-1/block 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 bash[142164]: Running command: /usr/bin/ln -snf /dev/ceph-b86ddfd2-2237-44fb-b1e4-096f4c84336a/osd-block-718beebe-c05a-490a-835a-00fdd797508b /var/lib/ceph/osd/ceph-1/block 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 bash[142164]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 bash[142164]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 bash[142164]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate[142188]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 bash[142164]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 podman[142164]: 2026-03-10 09:57:19.351077819 +0000 UTC m=+1.551065340 container died f17f5fd1ab97f136d9925f0c24fedcdb266679957c096fd75e35e5ffc5369bad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:57:19.531 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 podman[142164]: 2026-03-10 09:57:19.375327479 +0000 UTC m=+1.575315010 container remove f17f5fd1ab97f136d9925f0c24fedcdb266679957c096fd75e35e5ffc5369bad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-activate, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0) 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 6, 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 10, 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 6 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:57:19.736 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:57:19.874 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 podman[142850]: 2026-03-10 09:57:19.570304131 +0000 UTC m=+0.023975840 container create 88805c559d1d70cbe551b17205fb1afea9ffba59937bb65bcb4dc2d935128aa9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223) 2026-03-10T09:57:19.874 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 podman[142850]: 2026-03-10 09:57:19.640731639 +0000 UTC m=+0.094403348 container init 88805c559d1d70cbe551b17205fb1afea9ffba59937bb65bcb4dc2d935128aa9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:57:19.874 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 podman[142850]: 2026-03-10 09:57:19.657156052 +0000 UTC m=+0.110827761 container start 88805c559d1d70cbe551b17205fb1afea9ffba59937bb65bcb4dc2d935128aa9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=squid, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223) 2026-03-10T09:57:19.874 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 bash[142850]: 88805c559d1d70cbe551b17205fb1afea9ffba59937bb65bcb4dc2d935128aa9 2026-03-10T09:57:19.874 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 podman[142850]: 2026-03-10 09:57:19.562714213 +0000 UTC m=+0.016385922 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:57:19.874 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:19 vm00 systemd[1]: Started Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:57:19.874 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:19 vm00 ceph-mon[118593]: pgmap v199: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:19.874 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:19 vm00 ceph-mon[118593]: Health check update: Degraded data redundancy: 77/627 objects degraded (12.281%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T09:57:19.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:19 vm00 ceph-mon[121907]: pgmap v199: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:19.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:19 vm00 ceph-mon[121907]: Health check update: Degraded data redundancy: 77/627 objects degraded (12.281%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T09:57:20.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:19 vm03 ceph-mon[123760]: pgmap v199: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:19 vm03 ceph-mon[123760]: Health check update: Degraded data redundancy: 77/627 objects degraded (12.281%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T09:57:20.051 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "6/23 daemons upgraded", 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:57:20.052 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:57:20.583 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[142865]: 2026-03-10T09:57:20.379+0000 7fa8f969f740 -1 Falling back to public interface 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline; 1 osds down; Degraded data redundancy: 77/627 objects degraded (12.281%), 20 pgs degraded 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] OSD_DOWN: 1 osds down 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: osd.1 (root=default,host=vm00) is down 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] PG_DEGRADED: Degraded data redundancy: 77/627 objects degraded (12.281%), 20 pgs degraded 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.2 is active+undersized+degraded, acting [5,6] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.9 is active+undersized+degraded, acting [7,3] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.a is active+undersized+degraded, acting [3,7] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.10 is active+undersized+degraded, acting [2,0] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.0 is active+undersized+degraded, acting [2,6] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.4 is active+undersized+degraded, acting [2,5] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.8 is active+undersized+degraded, acting [3,7] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.a is active+undersized+degraded, acting [6,4] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.e is active+undersized+degraded, acting [7,4] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.16 is active+undersized+degraded, acting [5,7] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.18 is active+undersized+degraded, acting [3,0] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.19 is active+undersized+degraded, acting [3,4] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.1a is active+undersized+degraded, acting [4,2] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.1c is active+undersized+degraded, acting [5,4] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.9 is active+undersized+degraded, acting [4,3] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.d is active+undersized+degraded, acting [4,2] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.e is active+undersized+degraded, acting [4,6] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.14 is active+undersized+degraded, acting [3,7] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.1f is active+undersized+degraded, acting [6,5] 2026-03-10T09:57:20.678 INFO:teuthology.orchestra.run.vm00.stdout: pg 6.1a is active+undersized+degraded, acting [4,5] 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='client.44185 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='client.34230 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='client.54173 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/568712322' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:20.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='client.44185 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='client.34230 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='client.54173 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/568712322' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:20.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:57:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:57:20] "GET /metrics HTTP/1.1" 200 38190 "" "Prometheus/2.51.0" 2026-03-10T09:57:21.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='client.44185 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:21.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='client.34230 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:21.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='client.54173 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:21.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/568712322' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:21.229 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:57:21 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:57:21.016+0000 7f8211476640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-10T09:57:21.501 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:21 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[142865]: 2026-03-10T09:57:21.371+0000 7fa8f969f740 -1 osd.1 111 log_to_monitors true 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: pgmap v200: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1008 B/s rd, 0 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: pgmap v201: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: pgmap v202: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: pgmap v203: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='client.44206 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: pgmap v204: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: pgmap v205: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 2 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: pgmap v206: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/1118248578' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:21.796 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[118593]: from='osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: pgmap v200: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1008 B/s rd, 0 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: pgmap v201: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: pgmap v202: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: pgmap v203: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='client.44206 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: pgmap v204: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: pgmap v205: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 2 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: pgmap v206: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/1118248578' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:21.797 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:21 vm00 ceph-mon[121907]: from='osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:57:22.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: pgmap v200: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1008 B/s rd, 0 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: pgmap v201: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:22.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: pgmap v202: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: pgmap v203: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='client.44206 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: pgmap v204: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 1 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: pgmap v205: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 2 op/s; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: pgmap v206: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/1118248578' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:21 vm03 ceph-mon[123760]: from='osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T09:57:22.119 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 09:57:22 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[142865]: 2026-03-10T09:57:22.000+0000 7fa8f144a640 -1 osd.1 111 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:57:22.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: pgmap v207: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: pgmap v208: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:22.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: pgmap v209: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: Upgrade: unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: from='osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: osdmap e114: 8 total, 7 up, 8 in 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: from='osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: pgmap v207: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: pgmap v208: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: pgmap v209: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: Upgrade: unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: from='osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: osdmap e114: 8 total, 7 up, 8 in 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: from='osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:57:22.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: pgmap v207: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: pgmap v208: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: pgmap v209: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 77/627 objects degraded (12.281%) 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: Upgrade: unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: from='osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: osdmap e114: 8 total, 7 up, 8 in 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: from='osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:57:23.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:23.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:23 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:57:23.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:23 vm00 ceph-mon[118593]: osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758] boot 2026-03-10T09:57:23.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:23 vm00 ceph-mon[118593]: osdmap e115: 8 total, 8 up, 8 in 2026-03-10T09:57:23.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:57:23.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:23 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:57:23.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:23 vm00 ceph-mon[121907]: osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758] boot 2026-03-10T09:57:23.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:23 vm00 ceph-mon[121907]: osdmap e115: 8 total, 8 up, 8 in 2026-03-10T09:57:23.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:57:23.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:23 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:57:23.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:23 vm03 ceph-mon[123760]: osd.1 [v2:192.168.123.100:6810/2832215758,v1:192.168.123.100:6811/2832215758] boot 2026-03-10T09:57:23.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:23 vm03 ceph-mon[123760]: osdmap e115: 8 total, 8 up, 8 in 2026-03-10T09:57:23.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T09:57:24.300 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:57:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:57:24.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.2\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-10T09:57:24.300 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:57:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:57:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.2\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.103:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:57:25.037 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:24 vm03 ceph-mon[123760]: pgmap v211: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:25.037 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:24 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:25.037 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:24 vm03 ceph-mon[123760]: osdmap e116: 8 total, 8 up, 8 in 2026-03-10T09:57:25.037 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:24 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:25.037 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:24 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:25.037 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:24 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[118593]: pgmap v211: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[118593]: osdmap e116: 8 total, 8 up, 8 in 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[121907]: pgmap v211: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 147 MiB used, 160 GiB / 160 GiB avail; 83/627 objects degraded (13.238%) 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[121907]: osdmap e116: 8 total, 8 up, 8 in 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:25.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:24 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:26.016 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:25 vm03 ceph-mon[123760]: pgmap v214: 161 pgs: 37 peering, 20 active+undersized, 6 active+undersized+degraded, 98 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s; 34/627 objects degraded (5.423%) 2026-03-10T09:57:26.016 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:25 vm03 ceph-mon[123760]: Health check update: Degraded data redundancy: 34/627 objects degraded (5.423%), 6 pgs degraded (PG_DEGRADED) 2026-03-10T09:57:26.016 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:25 vm03 ceph-mon[123760]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:57:26.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:25 vm00 ceph-mon[118593]: pgmap v214: 161 pgs: 37 peering, 20 active+undersized, 6 active+undersized+degraded, 98 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s; 34/627 objects degraded (5.423%) 2026-03-10T09:57:26.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:25 vm00 ceph-mon[118593]: Health check update: Degraded data redundancy: 34/627 objects degraded (5.423%), 6 pgs degraded (PG_DEGRADED) 2026-03-10T09:57:26.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:25 vm00 ceph-mon[118593]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:57:26.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:25 vm00 ceph-mon[121907]: pgmap v214: 161 pgs: 37 peering, 20 active+undersized, 6 active+undersized+degraded, 98 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s; 34/627 objects degraded (5.423%) 2026-03-10T09:57:26.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:25 vm00 ceph-mon[121907]: Health check update: Degraded data redundancy: 34/627 objects degraded (5.423%), 6 pgs degraded (PG_DEGRADED) 2026-03-10T09:57:26.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:25 vm00 ceph-mon[121907]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T09:57:27.047 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 09:57:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T09:57:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm03\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"e2d4b2ee-1c65-11f1-bae0-b525704df8fa\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm03\", job=\"node\", machine=\"x86_64\", nodename=\"vm03\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T09:57:27.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:26 vm03 ceph-mon[123760]: pgmap v215: 161 pgs: 37 peering, 20 active+undersized, 6 active+undersized+degraded, 98 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 923 B/s rd, 0 op/s; 34/627 objects degraded (5.423%) 2026-03-10T09:57:27.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:26 vm00 ceph-mon[118593]: pgmap v215: 161 pgs: 37 peering, 20 active+undersized, 6 active+undersized+degraded, 98 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 923 B/s rd, 0 op/s; 34/627 objects degraded (5.423%) 2026-03-10T09:57:27.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:26 vm00 ceph-mon[121907]: pgmap v215: 161 pgs: 37 peering, 20 active+undersized, 6 active+undersized+degraded, 98 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 923 B/s rd, 0 op/s; 34/627 objects degraded (5.423%) 2026-03-10T09:57:28.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:28 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 34/627 objects degraded (5.423%), 6 pgs degraded) 2026-03-10T09:57:28.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:28 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 34/627 objects degraded (5.423%), 6 pgs degraded) 2026-03-10T09:57:29.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:28 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 34/627 objects degraded (5.423%), 6 pgs degraded) 2026-03-10T09:57:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:29 vm00 ceph-mon[118593]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:29.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:29 vm00 ceph-mon[121907]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:30.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:29 vm03 ceph-mon[123760]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:31.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:57:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:57:30] "GET /metrics HTTP/1.1" 200 38190 "" "Prometheus/2.51.0" 2026-03-10T09:57:31.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:31 vm00 ceph-mon[118593]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:31.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:31 vm00 ceph-mon[121907]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:32.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:31 vm03 ceph-mon[123760]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:33.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:33 vm00 ceph-mon[118593]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:33.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:33 vm00 ceph-mon[121907]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:33.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:33.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:33.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:33 vm03 ceph-mon[123760]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:34.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:34 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:34.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:34 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:34.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:34 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:35.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:35 vm00 ceph-mon[118593]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 973 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:35.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:35 vm00 ceph-mon[121907]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 973 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:35.534 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:35 vm03 ceph-mon[123760]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 973 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:36.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:36 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:36.350 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:36 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:36.350 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:36 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:37.395 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:37 vm00 systemd[1]: Stopping Ceph osd.2 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:57:37.395 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:37 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[65085]: 2026-03-10T09:57:37.239+0000 7fe661a00700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:57:37.395 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:37 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[65085]: 2026-03-10T09:57:37.239+0000 7fe661a00700 -1 osd.2 116 *** Got signal Terminated *** 2026-03-10T09:57:37.395 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:37 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[65085]: 2026-03-10T09:57:37.239+0000 7fe661a00700 -1 osd.2 116 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: Upgrade: osd.2 is safe to restart 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: Upgrade: Updating osd.2 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: Deploying daemon osd.2 on vm00 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:37 vm03 ceph-mon[123760]: osd.2 marked itself down and dead 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: Upgrade: osd.2 is safe to restart 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: Upgrade: Updating osd.2 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: Deploying daemon osd.2 on vm00 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[118593]: osd.2 marked itself down and dead 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: Upgrade: osd.2 is safe to restart 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: Upgrade: Updating osd.2 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: Deploying daemon osd.2 on vm00 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:37.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:37 vm00 ceph-mon[121907]: osd.2 marked itself down and dead 2026-03-10T09:57:38.464 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:38 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:57:38.464 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:38 vm00 ceph-mon[118593]: osdmap e117: 8 total, 7 up, 8 in 2026-03-10T09:57:38.464 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:38 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:57:38.464 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:38 vm00 ceph-mon[121907]: osdmap e117: 8 total, 7 up, 8 in 2026-03-10T09:57:38.464 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144593]: 2026-03-10 09:57:38.211088345 +0000 UTC m=+0.986235317 container died dc86a99a040321f376d0d01fcdf601021c2fa9817615b78a1f9b501360809eb5 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vendor=Red Hat, Inc., GIT_CLEAN=True, ceph=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, architecture=x86_64, com.redhat.component=centos-stream-container, CEPH_POINT_RELEASE=-17.2.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, maintainer=Guillaume Abrioux , version=8, GIT_BRANCH=HEAD, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, io.openshift.expose-services=, release=754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git) 2026-03-10T09:57:38.464 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144593]: 2026-03-10 09:57:38.233861807 +0000 UTC m=+1.009008769 container remove dc86a99a040321f376d0d01fcdf601021c2fa9817615b78a1f9b501360809eb5 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, io.openshift.tags=base centos centos-stream, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , release=754, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, distribution-scope=public, io.buildah.version=1.19.8, io.openshift.expose-services=, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.display-name=CentOS Stream 8, name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, com.redhat.component=centos-stream-container, version=8, GIT_CLEAN=True, architecture=x86_64, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_BRANCH=HEAD) 2026-03-10T09:57:38.464 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 bash[144593]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2 2026-03-10T09:57:38.464 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144658]: 2026-03-10 09:57:38.376946398 +0000 UTC m=+0.021186373 container create ea4d58f8bc30c63842918a3f9afafe1dc99ba144b2a3d2865d2d4604f3361cd6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=squid, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T09:57:38.464 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144658]: 2026-03-10 09:57:38.422045434 +0000 UTC m=+0.066285427 container init ea4d58f8bc30c63842918a3f9afafe1dc99ba144b2a3d2865d2d4604f3361cd6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:57:38.464 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144658]: 2026-03-10 09:57:38.425011264 +0000 UTC m=+0.069251248 container start ea4d58f8bc30c63842918a3f9afafe1dc99ba144b2a3d2865d2d4604f3361cd6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:57:38.464 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144658]: 2026-03-10 09:57:38.428006871 +0000 UTC m=+0.072247085 container attach ea4d58f8bc30c63842918a3f9afafe1dc99ba144b2a3d2865d2d4604f3361cd6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-10T09:57:38.792 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144658]: 2026-03-10 09:57:38.365989823 +0000 UTC m=+0.010229817 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:57:38.792 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 conmon[144669]: conmon ea4d58f8bc30c6384291 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-ea4d58f8bc30c63842918a3f9afafe1dc99ba144b2a3d2865d2d4604f3361cd6.scope/container/memory.events 2026-03-10T09:57:38.792 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144658]: 2026-03-10 09:57:38.563386221 +0000 UTC m=+0.207626205 container died ea4d58f8bc30c63842918a3f9afafe1dc99ba144b2a3d2865d2d4604f3361cd6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2) 2026-03-10T09:57:38.792 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144658]: 2026-03-10 09:57:38.584241062 +0000 UTC m=+0.228481046 container remove ea4d58f8bc30c63842918a3f9afafe1dc99ba144b2a3d2865d2d4604f3361cd6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid) 2026-03-10T09:57:38.792 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.2.service: Deactivated successfully. 2026-03-10T09:57:38.792 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.2.service: Unit process 144669 (conmon) remains running after unit stopped. 2026-03-10T09:57:38.792 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 systemd[1]: Stopped Ceph osd.2 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:57:38.792 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.2.service: Consumed 11.603s CPU time. 2026-03-10T09:57:38.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:38 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:57:38.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:38 vm03 ceph-mon[123760]: osdmap e117: 8 total, 7 up, 8 in 2026-03-10T09:57:39.050 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 systemd[1]: Starting Ceph osd.2 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:57:39.050 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144760]: 2026-03-10 09:57:38.901275129 +0000 UTC m=+0.017661175 container create 92b6ff3aab8960e0215f7a65518b5415dbbd77c2fbb522a9d8a7b2b13bee94e2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:57:39.050 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144760]: 2026-03-10 09:57:38.945784963 +0000 UTC m=+0.062171009 container init 92b6ff3aab8960e0215f7a65518b5415dbbd77c2fbb522a9d8a7b2b13bee94e2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:57:39.050 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144760]: 2026-03-10 09:57:38.949472414 +0000 UTC m=+0.065858460 container start 92b6ff3aab8960e0215f7a65518b5415dbbd77c2fbb522a9d8a7b2b13bee94e2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:57:39.050 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144760]: 2026-03-10 09:57:38.950439985 +0000 UTC m=+0.066826031 container attach 92b6ff3aab8960e0215f7a65518b5415dbbd77c2fbb522a9d8a7b2b13bee94e2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=squid, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:57:39.050 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:38 vm00 podman[144760]: 2026-03-10 09:57:38.894086796 +0000 UTC m=+0.010472853 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:57:39.050 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:39.050 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:39.050 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:39.369 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:39 vm03 ceph-mon[123760]: pgmap v222: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:57:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:39 vm03 ceph-mon[123760]: osdmap e118: 8 total, 7 up, 8 in 2026-03-10T09:57:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:39 vm00 ceph-mon[118593]: pgmap v222: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:57:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:39 vm00 ceph-mon[118593]: osdmap e118: 8 total, 7 up, 8 in 2026-03-10T09:57:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:39 vm00 ceph-mon[121907]: pgmap v222: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:57:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:39 vm00 ceph-mon[121907]: osdmap e118: 8 total, 7 up, 8 in 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-21f68641-3c78-4780-a86a-6531a8308dfe/osd-block-b8c6c746-4058-4653-b11b-b1cb8e4cd332 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-10T09:57:39.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-21f68641-3c78-4780-a86a-6531a8308dfe/osd-block-b8c6c746-4058-4653-b11b-b1cb8e4cd332 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/ln -snf /dev/ceph-21f68641-3c78-4780-a86a-6531a8308dfe/osd-block-b8c6c746-4058-4653-b11b-b1cb8e4cd332 /var/lib/ceph/osd/ceph-2/block 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/ln -snf /dev/ceph-21f68641-3c78-4780-a86a-6531a8308dfe/osd-block-b8c6c746-4058-4653-b11b-b1cb8e4cd332 /var/lib/ceph/osd/ceph-2/block 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[144771]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 bash[144760]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 podman[144760]: 2026-03-10 09:57:39.93892988 +0000 UTC m=+1.055315926 container died 92b6ff3aab8960e0215f7a65518b5415dbbd77c2fbb522a9d8a7b2b13bee94e2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:39 vm00 podman[144760]: 2026-03-10 09:57:39.958999502 +0000 UTC m=+1.075385548 container remove 92b6ff3aab8960e0215f7a65518b5415dbbd77c2fbb522a9d8a7b2b13bee94e2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS) 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:40 vm00 podman[145010]: 2026-03-10 09:57:40.068978314 +0000 UTC m=+0.020515017 container create 12681fec7ba81e6e10765beab3775835e238844bc35691541edbbf1d71b69147 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, ceph=True) 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:40 vm00 podman[145010]: 2026-03-10 09:57:40.09996095 +0000 UTC m=+0.051497672 container init 12681fec7ba81e6e10765beab3775835e238844bc35691541edbbf1d71b69147 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:40 vm00 podman[145010]: 2026-03-10 09:57:40.1062771 +0000 UTC m=+0.057813813 container start 12681fec7ba81e6e10765beab3775835e238844bc35691541edbbf1d71b69147 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, org.label-schema.build-date=20260223, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, CEPH_REF=squid, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0) 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:40 vm00 bash[145010]: 12681fec7ba81e6e10765beab3775835e238844bc35691541edbbf1d71b69147 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:40 vm00 podman[145010]: 2026-03-10 09:57:40.06175735 +0000 UTC m=+0.013294072 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:57:40.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:40 vm00 systemd[1]: Started Ceph osd.2 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:57:40.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[118593]: Health check failed: Reduced data availability: 6 pgs inactive, 6 pgs peering (PG_AVAILABILITY) 2026-03-10T09:57:40.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:40.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:40.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:40.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:40.704 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:40.707 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[121907]: Health check failed: Reduced data availability: 6 pgs inactive, 6 pgs peering (PG_AVAILABILITY) 2026-03-10T09:57:40.707 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:40.707 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:40.707 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:40.707 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:40.707 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:40 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:40.708 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:57:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:57:40] "GET /metrics HTTP/1.1" 200 38195 "" "Prometheus/2.51.0" 2026-03-10T09:57:40.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:40 vm03 ceph-mon[123760]: Health check failed: Reduced data availability: 6 pgs inactive, 6 pgs peering (PG_AVAILABILITY) 2026-03-10T09:57:40.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:40 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:40.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:40 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:40.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:40 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:40.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:40 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:40.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:40 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.119 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[145020]: 2026-03-10T09:57:40.702+0000 7f17db122740 -1 Falling back to public interface 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: pgmap v224: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: pgmap v225: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 865 B/s rd, 0 op/s 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: pgmap v226: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.495 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: pgmap v224: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: pgmap v225: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 865 B/s rd, 0 op/s 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: pgmap v226: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:41 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.496 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[145020]: 2026-03-10T09:57:41.200+0000 7f17db122740 -1 osd.2 0 read_superblock omap replica is missing. 2026-03-10T09:57:41.496 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:41 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[145020]: 2026-03-10T09:57:41.287+0000 7f17db122740 -1 osd.2 116 log_to_monitors true 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: pgmap v224: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: pgmap v225: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 865 B/s rd, 0 op/s 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: pgmap v226: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:41.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:41 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v227: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 1 op/s 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v228: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v229: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v230: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v231: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v232: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v233: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v234: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v235: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: pgmap v236: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 27/627 objects degraded (4.306%), 5 pgs degraded (PG_DEGRADED) 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: osdmap e119: 8 total, 7 up, 8 in 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v227: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 1 op/s 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v228: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v229: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v230: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v231: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v232: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v233: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v234: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v235: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: pgmap v236: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 27/627 objects degraded (4.306%), 5 pgs degraded (PG_DEGRADED) 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: osdmap e119: 8 total, 7 up, 8 in 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:42 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.621 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:57:42 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[145020]: 2026-03-10T09:57:42.293+0000 7f17d2ecd640 -1 osd.2 116 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v227: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 1 op/s 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v228: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v229: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v230: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v231: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v232: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v233: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v234: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v235: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: pgmap v236: 161 pgs: 32 peering, 4 stale+active+clean, 125 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 27/627 objects degraded (4.306%), 5 pgs degraded (PG_DEGRADED) 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: osdmap e119: 8 total, 7 up, 8 in 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:42.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:42 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.741 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v237: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.741 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v238: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.741 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.741 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v239: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.741 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v240: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v241: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v242: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v243: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v244: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v246: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v247: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v248: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v249: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v250: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: pgmap v251: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935] boot 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: osdmap e120: 8 total, 8 up, 8 in 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.742 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v237: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v238: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v239: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v240: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v241: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v242: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v243: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v244: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v246: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v247: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v248: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v249: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v250: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: pgmap v251: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935] boot 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: osdmap e120: 8 total, 8 up, 8 in 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.743 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:43 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v237: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v238: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v239: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v240: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v241: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v242: 161 pgs: 6 active+undersized, 32 peering, 2 stale+active+clean, 2 active+undersized+degraded, 119 active+clean; 457 KiB data, 148 MiB used, 160 GiB / 160 GiB avail; 7/627 objects degraded (1.116%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v243: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v244: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v246: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v247: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 149 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v248: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v249: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v250: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: pgmap v251: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: osd.2 [v2:192.168.123.100:6818/3125648935,v1:192.168.123.100:6819/3125648935] boot 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: osdmap e120: 8 total, 8 up, 8 in 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:43.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:43.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:43 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: pgmap v252: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: pgmap v253: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: pgmap v254: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: pgmap v255: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: pgmap v256: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: pgmap v257: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: pgmap v259: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 7.3 KiB/s rd, 7 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: pgmap v260: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 7.5 KiB/s rd, 7 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: pgmap v261: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 7.5 KiB/s rd, 7 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: osdmap e121: 8 total, 8 up, 8 in 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: pgmap v252: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: pgmap v253: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: pgmap v254: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: pgmap v255: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: pgmap v256: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: pgmap v257: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: pgmap v259: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 7.3 KiB/s rd, 7 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: pgmap v260: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 7.5 KiB/s rd, 7 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: pgmap v261: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 7.5 KiB/s rd, 7 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: osdmap e121: 8 total, 8 up, 8 in 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: pgmap v252: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: pgmap v253: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: pgmap v254: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: pgmap v255: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: pgmap v256: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: pgmap v257: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: pgmap v259: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 7.3 KiB/s rd, 7 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: pgmap v260: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 7.5 KiB/s rd, 7 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: pgmap v261: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 7.5 KiB/s rd, 7 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: osdmap e121: 8 total, 8 up, 8 in 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:44.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v262: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 8.7 KiB/s rd, 8 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v263: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 8.7 KiB/s rd, 8 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v264: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 8.8 KiB/s rd, 8 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v265: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v266: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v267: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v268: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v269: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v270: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v271: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v273: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v274: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v275: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.527 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: pgmap v276: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v262: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 8.7 KiB/s rd, 8 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v263: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 8.7 KiB/s rd, 8 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v264: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 8.8 KiB/s rd, 8 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v265: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v266: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v267: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v268: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v269: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v270: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v271: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v273: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v274: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.528 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v275: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: pgmap v276: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.529 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:57:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:57:45.315+0000 7f8211476640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (24 PGs are or would become offline) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v262: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 8.7 KiB/s rd, 8 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v263: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 8.7 KiB/s rd, 8 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v264: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 8.8 KiB/s rd, 8 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v265: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v266: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v267: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v268: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v269: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v270: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v271: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v273: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v274: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v275: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: pgmap v276: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:45.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: pgmap v277: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: pgmap v278: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: pgmap v279: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: pgmap v280: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: Upgrade: unsafe to stop osd(s) at this time (24 PGs are or would become offline) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: pgmap v277: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: pgmap v278: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: pgmap v279: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: pgmap v280: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: Upgrade: unsafe to stop osd(s) at this time (24 PGs are or would become offline) 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:46.542 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:46 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:46.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: pgmap v277: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: pgmap v278: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:46.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: pgmap v279: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: pgmap v280: 161 pgs: 10 active+undersized, 32 peering, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 27/627 objects degraded (4.306%) 2026-03-10T09:57:46.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:57:46.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:57:46.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: Upgrade: unsafe to stop osd(s) at this time (24 PGs are or would become offline) 2026-03-10T09:57:46.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:46.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:46.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:46 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:47.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:57:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:57:46.951Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:57:47.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:47.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:47.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:47.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:47.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:47.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:48.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:57:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:48 vm00 ceph-mon[118593]: pgmap v281: 161 pgs: 3 active+undersized, 32 peering, 1 active+undersized+degraded, 125 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 1/627 objects degraded (0.159%) 2026-03-10T09:57:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:48 vm00 ceph-mon[118593]: Health check update: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T09:57:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:48 vm00 ceph-mon[121907]: pgmap v281: 161 pgs: 3 active+undersized, 32 peering, 1 active+undersized+degraded, 125 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 1/627 objects degraded (0.159%) 2026-03-10T09:57:48.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:48 vm00 ceph-mon[121907]: Health check update: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T09:57:49.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:48 vm03 ceph-mon[123760]: pgmap v281: 161 pgs: 3 active+undersized, 32 peering, 1 active+undersized+degraded, 125 active+clean; 457 KiB data, 167 MiB used, 160 GiB / 160 GiB avail; 1/627 objects degraded (0.159%) 2026-03-10T09:57:49.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:48 vm03 ceph-mon[123760]: Health check update: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T09:57:50.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:49 vm03 ceph-mon[123760]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 6 pgs inactive, 6 pgs peering) 2026-03-10T09:57:50.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:49 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded) 2026-03-10T09:57:50.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:49 vm00 ceph-mon[118593]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 6 pgs inactive, 6 pgs peering) 2026-03-10T09:57:50.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:49 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded) 2026-03-10T09:57:50.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:49 vm00 ceph-mon[121907]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 6 pgs inactive, 6 pgs peering) 2026-03-10T09:57:50.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:49 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded) 2026-03-10T09:57:50.926 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:57:50 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:57:50] "GET /metrics HTTP/1.1" 200 38210 "" "Prometheus/2.51.0" 2026-03-10T09:57:50.926 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:57:50.926 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:50 vm00 ceph-mon[121907]: pgmap v282: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:57:50.926 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:50 vm00 ceph-mon[118593]: pgmap v282: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:57:51.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:50 vm03 ceph-mon[123760]: pgmap v282: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 4s ago 11m - - 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 78s ago 11m - - 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (5m) 4s ago 9m 26.0M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (5m) 78s ago 9m 47.7M - dad864ee21e9 011f2081bf92 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (3m) 4s ago 8m 52.7M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (2m) 78s ago 10m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (4m) 4s ago 11m 568M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (2m) 4s ago 11m 67.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (2m) 78s ago 11m 41.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (2m) 4s ago 11m 48.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (5m) 4s ago 9m 9601k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (5m) 78s ago 9m 9647k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (67s) 4s ago 10m 70.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (31s) 4s ago 10m 44.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 88805c559d1d 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (11s) 4s ago 10m 42.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 12681fec7ba8 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (10m) 4s ago 10m 56.8M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (10m) 78s ago 10m 55.7M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (10m) 78s ago 10m 53.1M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (9m) 78s ago 9m 51.1M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (9m) 78s ago 9m 52.8M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (3m) 78s ago 9m 44.0M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (9m) 4s ago 9m 94.5M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:57:51.351 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (9m) 78s ago 9m 95.3M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:57:51.352 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (8m) 4s ago 8m 95.5M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:57:51.352 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (8m) 78s ago 8m 92.5M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 9, 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:57:51.607 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:57:51.830 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "8/23 daemons upgraded", 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:57:51.831 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:57:51.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:51 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/1823621857' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:51.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:51 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/1823621857' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:52.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:51 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/1823621857' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:57:52.087 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:57:52.087 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:57:52.087 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:57:53.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:52 vm03 ceph-mon[123760]: from='client.34257 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:52 vm03 ceph-mon[123760]: pgmap v283: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 826 B/s rd, 0 op/s 2026-03-10T09:57:53.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:52 vm03 ceph-mon[123760]: from='client.44227 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:52 vm03 ceph-mon[123760]: from='client.44233 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:52 vm03 ceph-mon[123760]: from='client.34275 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:52 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/588135090' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[118593]: from='client.34257 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[118593]: pgmap v283: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 826 B/s rd, 0 op/s 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[118593]: from='client.44227 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[118593]: from='client.44233 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[118593]: from='client.34275 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/588135090' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[121907]: from='client.34257 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[121907]: pgmap v283: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 826 B/s rd, 0 op/s 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[121907]: from='client.44227 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[121907]: from='client.44233 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[121907]: from='client.34275 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:57:53.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:52 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/588135090' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:57:53.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:53 vm03 ceph-mon[123760]: pgmap v284: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 624 B/s rd, 0 op/s 2026-03-10T09:57:53.951 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:53 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:54.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:53 vm00 ceph-mon[118593]: pgmap v284: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 624 B/s rd, 0 op/s 2026-03-10T09:57:54.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:53 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:54.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:53 vm00 ceph-mon[121907]: pgmap v284: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 624 B/s rd, 0 op/s 2026-03-10T09:57:54.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:53 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:57:56.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:56 vm03 ceph-mon[123760]: pgmap v285: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:57:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:56 vm00 ceph-mon[118593]: pgmap v285: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:57:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:56 vm00 ceph-mon[121907]: pgmap v285: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:57:57.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:57:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:57:56.951Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:57:57.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:57:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:57:56.952Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:57:58.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:58 vm03 ceph-mon[123760]: pgmap v286: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:57:58.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:57:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:58.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:58 vm00 ceph-mon[118593]: pgmap v286: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:57:58.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:57:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:57:58.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:58 vm00 ceph-mon[121907]: pgmap v286: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:57:58.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:57:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:00.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:00 vm03 ceph-mon[123760]: pgmap v287: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:58:00.552 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:00 vm00 ceph-mon[118593]: pgmap v287: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:58:00.552 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:00 vm00 ceph-mon[121907]: pgmap v287: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:58:00.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:58:00] "GET /metrics HTTP/1.1" 200 38210 "" "Prometheus/2.51.0" 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[121907]: Upgrade: osd.2 is safe to restart 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[118593]: Upgrade: osd.2 is safe to restart 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:58:01.433 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:01 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:01.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:58:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:01 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T09:58:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:01 vm03 ceph-mon[123760]: Upgrade: osd.2 is safe to restart 2026-03-10T09:58:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T09:58:01.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:01 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:01.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:01 vm00 systemd[1]: Stopping Ceph osd.2 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:58:01.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:01 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[145020]: 2026-03-10T09:58:01.549+0000 7f17d80b7640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:58:01.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:01 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[145020]: 2026-03-10T09:58:01.549+0000 7f17d80b7640 -1 osd.2 121 *** Got signal Terminated *** 2026-03-10T09:58:01.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:01 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[145020]: 2026-03-10T09:58:01.549+0000 7f17d80b7640 -1 osd.2 121 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[118593]: Upgrade: Updating osd.2 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[118593]: Deploying daemon osd.2 on vm00 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[118593]: pgmap v288: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[118593]: osd.2 marked itself down and dead 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[121907]: Upgrade: Updating osd.2 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[121907]: Deploying daemon osd.2 on vm00 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[121907]: pgmap v288: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[121907]: osd.2 marked itself down and dead 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:02.458 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:02.458 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147731]: 2026-03-10 09:58:02.247114976 +0000 UTC m=+0.711423651 container died 12681fec7ba81e6e10765beab3775835e238844bc35691541edbbf1d71b69147 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-10T09:58:02.458 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147731]: 2026-03-10 09:58:02.274562818 +0000 UTC m=+0.738871491 container remove 12681fec7ba81e6e10765beab3775835e238844bc35691541edbbf1d71b69147 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-10T09:58:02.458 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 bash[147731]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2 2026-03-10T09:58:02.458 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147797]: 2026-03-10 09:58:02.422143492 +0000 UTC m=+0.020093908 container create 9c7fb1f5a29ef806a583f6b9fbceb747c97e7ef03b239f8570b4496105e8c287 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS) 2026-03-10T09:58:02.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:02 vm03 ceph-mon[123760]: Upgrade: Updating osd.2 2026-03-10T09:58:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:02 vm03 ceph-mon[123760]: Deploying daemon osd.2 on vm00 2026-03-10T09:58:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:02 vm03 ceph-mon[123760]: pgmap v288: 161 pgs: 161 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:58:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:02 vm03 ceph-mon[123760]: osd.2 marked itself down and dead 2026-03-10T09:58:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:02.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:02.762 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147797]: 2026-03-10 09:58:02.473908124 +0000 UTC m=+0.071858538 container init 9c7fb1f5a29ef806a583f6b9fbceb747c97e7ef03b239f8570b4496105e8c287 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223) 2026-03-10T09:58:02.762 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147797]: 2026-03-10 09:58:02.476881228 +0000 UTC m=+0.074831653 container start 9c7fb1f5a29ef806a583f6b9fbceb747c97e7ef03b239f8570b4496105e8c287 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-10T09:58:02.762 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147797]: 2026-03-10 09:58:02.484161594 +0000 UTC m=+0.082112019 container attach 9c7fb1f5a29ef806a583f6b9fbceb747c97e7ef03b239f8570b4496105e8c287 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2) 2026-03-10T09:58:02.762 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147797]: 2026-03-10 09:58:02.410580091 +0000 UTC m=+0.008530527 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:58:02.762 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147797]: 2026-03-10 09:58:02.62578037 +0000 UTC m=+0.223730795 container died 9c7fb1f5a29ef806a583f6b9fbceb747c97e7ef03b239f8570b4496105e8c287 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-10T09:58:02.763 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147797]: 2026-03-10 09:58:02.658221787 +0000 UTC m=+0.256172212 container remove 9c7fb1f5a29ef806a583f6b9fbceb747c97e7ef03b239f8570b4496105e8c287 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:58:02.763 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.2.service: Deactivated successfully. 2026-03-10T09:58:02.763 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 systemd[1]: Stopped Ceph osd.2 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:58:03.059 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 systemd[1]: Starting Ceph osd.2 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:58:03.059 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:02 vm00 podman[147900]: 2026-03-10 09:58:02.978409186 +0000 UTC m=+0.028704052 container create 07602fb7864b9f2037d80f79babac3251da2134553eb28fa9cc433a757aab4eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default) 2026-03-10T09:58:03.059 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 podman[147900]: 2026-03-10 09:58:03.017807643 +0000 UTC m=+0.068102509 container init 07602fb7864b9f2037d80f79babac3251da2134553eb28fa9cc433a757aab4eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=squid) 2026-03-10T09:58:03.059 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 podman[147900]: 2026-03-10 09:58:03.023250158 +0000 UTC m=+0.073545024 container start 07602fb7864b9f2037d80f79babac3251da2134553eb28fa9cc433a757aab4eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, OSD_FLAVOR=default, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2) 2026-03-10T09:58:03.059 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 podman[147900]: 2026-03-10 09:58:03.024583815 +0000 UTC m=+0.074878671 container attach 07602fb7864b9f2037d80f79babac3251da2134553eb28fa9cc433a757aab4eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-10T09:58:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:03 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:58:03.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:03 vm00 ceph-mon[118593]: osdmap e122: 8 total, 7 up, 8 in 2026-03-10T09:58:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:03 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:58:03.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:03 vm00 ceph-mon[121907]: osdmap e122: 8 total, 7 up, 8 in 2026-03-10T09:58:03.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 podman[147900]: 2026-03-10 09:58:02.960897633 +0000 UTC m=+0.011192509 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:58:03.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:03.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 bash[147900]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:03.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:03.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 bash[147900]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:03.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:03 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:58:03.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:03 vm03 ceph-mon[123760]: osdmap e122: 8 total, 7 up, 8 in 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 bash[147900]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 bash[147900]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 bash[147900]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 bash[147900]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-21f68641-3c78-4780-a86a-6531a8308dfe/osd-block-b8c6c746-4058-4653-b11b-b1cb8e4cd332 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-10T09:58:03.991 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 bash[147900]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-21f68641-3c78-4780-a86a-6531a8308dfe/osd-block-b8c6c746-4058-4653-b11b-b1cb8e4cd332 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-10T09:58:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:04 vm00 ceph-mon[118593]: pgmap v290: 161 pgs: 12 stale+active+clean, 149 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:58:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:04 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:04 vm00 ceph-mon[118593]: osdmap e123: 8 total, 7 up, 8 in 2026-03-10T09:58:04.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:04 vm00 ceph-mon[121907]: pgmap v290: 161 pgs: 12 stale+active+clean, 149 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:58:04.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:04 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:04.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:04 vm00 ceph-mon[121907]: osdmap e123: 8 total, 7 up, 8 in 2026-03-10T09:58:04.369 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/ln -snf /dev/ceph-21f68641-3c78-4780-a86a-6531a8308dfe/osd-block-b8c6c746-4058-4653-b11b-b1cb8e4cd332 /var/lib/ceph/osd/ceph-2/block 2026-03-10T09:58:04.369 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 bash[147900]: Running command: /usr/bin/ln -snf /dev/ceph-21f68641-3c78-4780-a86a-6531a8308dfe/osd-block-b8c6c746-4058-4653-b11b-b1cb8e4cd332 /var/lib/ceph/osd/ceph-2/block 2026-03-10T09:58:04.369 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:03 vm00 bash[147900]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 bash[147900]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 bash[147900]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate[147911]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 bash[147900]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 conmon[147911]: conmon 07602fb7864b9f2037d8 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-07602fb7864b9f2037d80f79babac3251da2134553eb28fa9cc433a757aab4eb.scope/container/memory.events 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 podman[147900]: 2026-03-10 09:58:04.025682238 +0000 UTC m=+1.075977104 container died 07602fb7864b9f2037d80f79babac3251da2134553eb28fa9cc433a757aab4eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.build-date=20260223, io.buildah.version=1.41.3) 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 podman[147900]: 2026-03-10 09:58:04.054828917 +0000 UTC m=+1.105123784 container remove 07602fb7864b9f2037d80f79babac3251da2134553eb28fa9cc433a757aab4eb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 podman[148151]: 2026-03-10 09:58:04.145250637 +0000 UTC m=+0.016471057 container create be8a08b99e6b10d02a7dfaea5b8b7bd492d136010c1defd27e79e8fccb30ff9e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 podman[148151]: 2026-03-10 09:58:04.193712457 +0000 UTC m=+0.064932877 container init be8a08b99e6b10d02a7dfaea5b8b7bd492d136010c1defd27e79e8fccb30ff9e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 podman[148151]: 2026-03-10 09:58:04.196895744 +0000 UTC m=+0.068116164 container start be8a08b99e6b10d02a7dfaea5b8b7bd492d136010c1defd27e79e8fccb30ff9e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 bash[148151]: be8a08b99e6b10d02a7dfaea5b8b7bd492d136010c1defd27e79e8fccb30ff9e 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 podman[148151]: 2026-03-10 09:58:04.138444189 +0000 UTC m=+0.009664609 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:58:04.370 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 systemd[1]: Started Ceph osd.2 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:58:04.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:04 vm03 ceph-mon[123760]: pgmap v290: 161 pgs: 12 stale+active+clean, 149 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:58:04.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:04 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:04.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:04 vm03 ceph-mon[123760]: osdmap e123: 8 total, 7 up, 8 in 2026-03-10T09:58:05.091 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[148163]: 2026-03-10T09:58:04.787+0000 7f2848738740 -1 Falling back to public interface 2026-03-10T09:58:05.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: pgmap v292: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: pgmap v293: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 974 B/s rd, 0 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: pgmap v294: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: pgmap v295: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s rd, 2 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: pgmap v296: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 3 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: pgmap v297: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 3.9 KiB/s rd, 3 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: pgmap v298: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: pgmap v299: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: pgmap v300: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: pgmap v292: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: pgmap v293: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 974 B/s rd, 0 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: pgmap v294: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: pgmap v295: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s rd, 2 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: pgmap v296: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 3 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: pgmap v297: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 3.9 KiB/s rd, 3 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: pgmap v298: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: pgmap v299: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: pgmap v300: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.371 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.372 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:05 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: pgmap v292: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: pgmap v293: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 974 B/s rd, 0 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: pgmap v294: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: pgmap v295: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s rd, 2 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: pgmap v296: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 3 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: pgmap v297: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 3.9 KiB/s rd, 3 op/s; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: pgmap v298: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: pgmap v299: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: pgmap v300: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:05.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:05 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:05.806 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:05 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[148163]: 2026-03-10T09:58:05.489+0000 7f2848738740 -1 osd.2 121 log_to_monitors true 2026-03-10T09:58:06.373 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v301: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v302: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v303: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v304: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v305: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v306: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v307: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v308: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v309: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v310: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 31/627 objects degraded (4.944%), 9 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v311: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v312: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v313: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: pgmap v314: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.374 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.375 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:06 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v301: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v302: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v303: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v304: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v305: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v306: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v307: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v308: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v309: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v310: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.468 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 31/627 objects degraded (4.944%), 9 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v311: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v312: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v313: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: pgmap v314: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.469 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v301: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v302: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v303: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v304: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v305: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v306: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v307: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v308: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v309: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v310: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 31/627 objects degraded (4.944%), 9 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v311: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v312: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v313: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T09:58:06.470 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: pgmap v314: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:06.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:06 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:06.729 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 09:58:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[148163]: 2026-03-10T09:58:06.487+0000 7f28404e3640 -1 osd.2 121 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:58:06.984 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:06.951Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:06.984 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:06.954Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:07.261 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:07 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:58:06.998+0000 7f8211476640 -1 mgr.server reply reply (11) Resource temporarily unavailable 1 pgs have unknown state; cannot draw any conclusions 2026-03-10T09:58:07.261 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:07 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:58:06.998+0000 7f8211476640 -1 mgr.server reply reply (16) Device or resource busy 1 pgs have unknown state; cannot draw any conclusionsunsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v315: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v316: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v317: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v318: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v319: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v320: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.549 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v321: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v322: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: osdmap e124: 8 total, 7 up, 8 in 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v324: 161 pgs: 3 unknown, 23 active+undersized, 1 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: pgmap v325: 161 pgs: 3 unknown, 23 active+undersized, 1 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.550 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:07.577 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v315: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v316: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v317: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v318: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v319: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v320: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v321: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v322: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: osdmap e124: 8 total, 7 up, 8 in 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v324: 161 pgs: 3 unknown, 23 active+undersized, 1 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: pgmap v325: 161 pgs: 3 unknown, 23 active+undersized, 1 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.578 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v315: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v316: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v317: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v318: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v319: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v320: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v321: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v322: 161 pgs: 23 active+undersized, 4 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 168 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: osdmap e124: 8 total, 7 up, 8 in 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v324: 161 pgs: 3 unknown, 23 active+undersized, 1 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: pgmap v325: 161 pgs: 3 unknown, 23 active+undersized, 1 stale+active+clean, 9 active+undersized+degraded, 125 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:07.579 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: pgmap v326: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: pgmap v327: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: pgmap v328: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: pgmap v329: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: Upgrade: 1 pgs have unknown state; cannot draw any conclusions 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960] boot 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: osdmap e125: 8 total, 8 up, 8 in 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: pgmap v326: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.428 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: pgmap v327: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: pgmap v328: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: pgmap v329: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: Upgrade: 1 pgs have unknown state; cannot draw any conclusions 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960] boot 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: osdmap e125: 8 total, 8 up, 8 in 2026-03-10T09:58:08.429 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:58:08.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: pgmap v326: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: pgmap v327: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:08.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: pgmap v328: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: pgmap v329: 161 pgs: 1 unknown, 29 active+undersized, 1 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 169 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:58:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: Upgrade: 1 pgs have unknown state; cannot draw any conclusions 2026-03-10T09:58:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:58:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: osd.2 [v2:192.168.123.100:6818/2005859960,v1:192.168.123.100:6819/2005859960] boot 2026-03-10T09:58:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: osdmap e125: 8 total, 8 up, 8 in 2026-03-10T09:58:08.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T09:58:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:09 vm03 ceph-mon[123760]: osdmap e126: 8 total, 8 up, 8 in 2026-03-10T09:58:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[118593]: osdmap e126: 8 total, 8 up, 8 in 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[121907]: osdmap e126: 8 total, 8 up, 8 in 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:11.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:10 vm03 ceph-mon[123760]: pgmap v332: 161 pgs: 24 peering, 18 active+undersized, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 170 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 2 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:58:11.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:10 vm00 ceph-mon[118593]: pgmap v332: 161 pgs: 24 peering, 18 active+undersized, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 170 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 2 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:58:11.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:10 vm00 ceph-mon[121907]: pgmap v332: 161 pgs: 24 peering, 18 active+undersized, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 170 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 2 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:58:11.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:58:10] "GET /metrics HTTP/1.1" 200 38209 "" "Prometheus/2.51.0" 2026-03-10T09:58:12.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:11 vm03 ceph-mon[123760]: Health check update: Degraded data redundancy: 27/627 objects degraded (4.306%), 5 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:12.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:11 vm00 ceph-mon[118593]: Health check update: Degraded data redundancy: 27/627 objects degraded (4.306%), 5 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:12.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:11 vm00 ceph-mon[121907]: Health check update: Degraded data redundancy: 27/627 objects degraded (4.306%), 5 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:13.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:12 vm03 ceph-mon[123760]: pgmap v333: 161 pgs: 24 peering, 18 active+undersized, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 170 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:58:13.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:12 vm00 ceph-mon[118593]: pgmap v333: 161 pgs: 24 peering, 18 active+undersized, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 170 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:58:13.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:12 vm00 ceph-mon[121907]: pgmap v333: 161 pgs: 24 peering, 18 active+undersized, 5 active+undersized+degraded, 114 active+clean; 457 KiB data, 170 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 27/627 objects degraded (4.306%) 2026-03-10T09:58:14.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:13 vm03 ceph-mon[123760]: pgmap v334: 161 pgs: 24 peering, 137 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:58:14.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:13 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:14.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:13 vm00 ceph-mon[118593]: pgmap v334: 161 pgs: 24 peering, 137 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:58:14.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:13 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:14.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:13 vm00 ceph-mon[121907]: pgmap v334: 161 pgs: 24 peering, 137 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T09:58:14.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:13 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:15.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:14 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 27/627 objects degraded (4.306%), 5 pgs degraded) 2026-03-10T09:58:15.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:14 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 27/627 objects degraded (4.306%), 5 pgs degraded) 2026-03-10T09:58:15.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:14 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 27/627 objects degraded (4.306%), 5 pgs degraded) 2026-03-10T09:58:16.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:15 vm03 ceph-mon[123760]: pgmap v335: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:58:16.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:15 vm00 ceph-mon[118593]: pgmap v335: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:58:16.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:15 vm00 ceph-mon[121907]: pgmap v335: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:58:17.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:16.952Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:17.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:16.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:18.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:17 vm03 ceph-mon[123760]: pgmap v336: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:58:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:18.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:17 vm00 ceph-mon[118593]: pgmap v336: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:58:18.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:18.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:18.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:17 vm00 ceph-mon[121907]: pgmap v336: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T09:58:18.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:18.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:20.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:19 vm03 ceph-mon[123760]: pgmap v337: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 989 B/s rd, 0 op/s 2026-03-10T09:58:20.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:19 vm00 ceph-mon[118593]: pgmap v337: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 989 B/s rd, 0 op/s 2026-03-10T09:58:20.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:19 vm00 ceph-mon[121907]: pgmap v337: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 989 B/s rd, 0 op/s 2026-03-10T09:58:21.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:58:20] "GET /metrics HTTP/1.1" 200 38209 "" "Prometheus/2.51.0" 2026-03-10T09:58:22.230 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:21 vm00 ceph-mon[121907]: pgmap v338: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:58:22.230 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:21 vm00 ceph-mon[118593]: pgmap v338: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:58:22.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:21 vm03 ceph-mon[123760]: pgmap v338: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:58:22.404 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:58:22.888 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:58:22.888 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 13s ago 11m - - 2026-03-10T09:58:22.888 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 109s ago 11m - - 2026-03-10T09:58:22.888 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (6m) 13s ago 10m 26.0M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (5m) 109s ago 9m 47.7M - dad864ee21e9 011f2081bf92 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (4m) 13s ago 9m 52.8M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (3m) 109s ago 11m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (5m) 13s ago 12m 570M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (3m) 13s ago 12m 70.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (2m) 109s ago 11m 41.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (2m) 13s ago 11m 50.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (5m) 13s ago 9m 9613k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (5m) 109s ago 9m 9647k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (98s) 13s ago 11m 71.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (63s) 13s ago 11m 44.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 88805c559d1d 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (18s) 13s ago 11m 42.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e be8a08b99e6b 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (10m) 13s ago 10m 57.5M 4096M 17.2.0 e1d6a67b021e 3c59893e3c18 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (10m) 109s ago 10m 55.7M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (10m) 109s ago 10m 53.1M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (10m) 109s ago 10m 51.1M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (10m) 109s ago 10m 52.8M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (4m) 109s ago 9m 44.0M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (9m) 13s ago 9m 94.5M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (9m) 109s ago 9m 95.3M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (9m) 13s ago 9m 95.6M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:58:22.889 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (9m) 109s ago 9m 92.5M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: Upgrade: osd.3 is safe to restart 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: Upgrade: Updating osd.3 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: from='client.44260 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: Deploying daemon osd.3 on vm00 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[118593]: from='client.44266 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:23.224 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:23.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:23.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: Upgrade: osd.3 is safe to restart 2026-03-10T09:58:23.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: Upgrade: Updating osd.3 2026-03-10T09:58:23.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:23.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: from='client.44260 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:23.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:58:23.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:23.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: Deploying daemon osd.3 on vm00 2026-03-10T09:58:23.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:22 vm00 ceph-mon[121907]: from='client.44266 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 9, 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:58:23.240 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: Upgrade: osd.3 is safe to restart 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: Upgrade: Updating osd.3 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: from='client.44260 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: Deploying daemon osd.3 on vm00 2026-03-10T09:58:23.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:22 vm03 ceph-mon[123760]: from='client.44266 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "8/23 daemons upgraded", 2026-03-10T09:58:23.471 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T09:58:23.472 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:58:23.472 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:58:23.777 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:58:23.777 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:58:23.777 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:58:23.777 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:23 vm00 systemd[1]: Stopping Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[118593]: pgmap v339: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[118593]: from='client.44272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3709735312' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[118593]: from='client.34308 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/4234901535' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[118593]: osd.3 marked itself down and dead 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[121907]: pgmap v339: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[121907]: from='client.44272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3709735312' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[121907]: from='client.34308 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:24.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/4234901535' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:58:24.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:23 vm00 ceph-mon[121907]: osd.3 marked itself down and dead 2026-03-10T09:58:24.120 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[67650]: 2026-03-10T09:58:23.782+0000 7f71fcd52700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:58:24.120 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[67650]: 2026-03-10T09:58:23.782+0000 7f71fcd52700 -1 osd.3 126 *** Got signal Terminated *** 2026-03-10T09:58:24.120 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:23 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[67650]: 2026-03-10T09:58:23.782+0000 7f71fcd52700 -1 osd.3 126 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:58:24.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:23 vm03 ceph-mon[123760]: pgmap v339: 161 pgs: 161 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:58:24.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:23 vm03 ceph-mon[123760]: from='client.44272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:24.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:23 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:24.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:23 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3709735312' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:24.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:23 vm03 ceph-mon[123760]: from='client.34308 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:24.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:23 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/4234901535' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:58:24.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:23 vm03 ceph-mon[123760]: osd.3 marked itself down and dead 2026-03-10T09:58:24.721 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 podman[150483]: 2026-03-10 09:58:24.447525867 +0000 UTC m=+0.682987822 container died 3c59893e3c1800116d596cb5dc360ee09048f30079c2acd05dd326647e1c4b69 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, release=754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, maintainer=Guillaume Abrioux , name=centos-stream, vendor=Red Hat, Inc., GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.display-name=CentOS Stream 8, architecture=x86_64, com.redhat.component=centos-stream-container, version=8, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.buildah.version=1.19.8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, distribution-scope=public, ceph=True, io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, RELEASE=HEAD, io.openshift.expose-services=, GIT_BRANCH=HEAD) 2026-03-10T09:58:24.722 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 podman[150483]: 2026-03-10 09:58:24.477141975 +0000 UTC m=+0.712603930 container remove 3c59893e3c1800116d596cb5dc360ee09048f30079c2acd05dd326647e1c4b69 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, version=8, CEPH_POINT_RELEASE=-17.2.0, com.redhat.component=centos-stream-container, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, distribution-scope=public, architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.expose-services=, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vendor=Red Hat, Inc., name=centos-stream, RELEASE=HEAD, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, io.k8s.display-name=CentOS Stream 8) 2026-03-10T09:58:24.722 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 bash[150483]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3 2026-03-10T09:58:24.722 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 podman[150552]: 2026-03-10 09:58:24.62979814 +0000 UTC m=+0.017258872 container create 70811ea2f9443f8e145f7f9cf8c490af0b53de09283a7cb6c381176864aa5b7b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:58:24.722 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 podman[150552]: 2026-03-10 09:58:24.674672255 +0000 UTC m=+0.062132987 container init 70811ea2f9443f8e145f7f9cf8c490af0b53de09283a7cb6c381176864aa5b7b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:58:24.722 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 podman[150552]: 2026-03-10 09:58:24.677890799 +0000 UTC m=+0.065351520 container start 70811ea2f9443f8e145f7f9cf8c490af0b53de09283a7cb6c381176864aa5b7b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:58:24.722 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 podman[150552]: 2026-03-10 09:58:24.678786635 +0000 UTC m=+0.066247367 container attach 70811ea2f9443f8e145f7f9cf8c490af0b53de09283a7cb6c381176864aa5b7b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-10T09:58:24.975 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:24 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:58:24.975 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:24 vm00 ceph-mon[118593]: osdmap e127: 8 total, 7 up, 8 in 2026-03-10T09:58:24.975 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:24 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:58:24.975 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:24 vm00 ceph-mon[121907]: osdmap e127: 8 total, 7 up, 8 in 2026-03-10T09:58:24.975 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 podman[150552]: 2026-03-10 09:58:24.622802106 +0000 UTC m=+0.010262849 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:58:24.975 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 podman[150571]: 2026-03-10 09:58:24.843746748 +0000 UTC m=+0.011484896 container died 70811ea2f9443f8e145f7f9cf8c490af0b53de09283a7cb6c381176864aa5b7b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:58:24.975 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 podman[150571]: 2026-03-10 09:58:24.875300291 +0000 UTC m=+0.043038430 container remove 70811ea2f9443f8e145f7f9cf8c490af0b53de09283a7cb6c381176864aa5b7b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:58:24.975 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.3.service: Deactivated successfully. 2026-03-10T09:58:24.975 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 systemd[1]: Stopped Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:58:24.975 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:24 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.3.service: Consumed 22.688s CPU time. 2026-03-10T09:58:25.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:24 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:58:25.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:24 vm03 ceph-mon[123760]: osdmap e127: 8 total, 7 up, 8 in 2026-03-10T09:58:25.228 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 systemd[1]: Starting Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:58:25.228 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 podman[150657]: 2026-03-10 09:58:25.204994994 +0000 UTC m=+0.017688827 container create 0e08842cb6e338cf24ac7b38d0b42f90ae0e177fae823f5a95bead8f30d5c530 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0) 2026-03-10T09:58:25.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 podman[150657]: 2026-03-10 09:58:25.257760438 +0000 UTC m=+0.070454281 container init 0e08842cb6e338cf24ac7b38d0b42f90ae0e177fae823f5a95bead8f30d5c530 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:58:25.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 podman[150657]: 2026-03-10 09:58:25.260559336 +0000 UTC m=+0.073253169 container start 0e08842cb6e338cf24ac7b38d0b42f90ae0e177fae823f5a95bead8f30d5c530 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=squid) 2026-03-10T09:58:25.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 podman[150657]: 2026-03-10 09:58:25.261407012 +0000 UTC m=+0.074100845 container attach 0e08842cb6e338cf24ac7b38d0b42f90ae0e177fae823f5a95bead8f30d5c530 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2) 2026-03-10T09:58:25.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 podman[150657]: 2026-03-10 09:58:25.197525906 +0000 UTC m=+0.010219748 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:58:25.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:25.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 bash[150657]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:25.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:25.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 bash[150657]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:26.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:58:26.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:26.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 bash[150657]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:58:26.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 bash[150657]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:26.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:26.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 bash[150657]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:26.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T09:58:26.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 bash[150657]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T09:58:26.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8d72d4d1-4a38-4b8a-86f6-7f8c95dfd8ac/osd-block-c20101af-d2e8-44bf-8684-17f240235e25 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-10T09:58:26.120 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:25 vm00 bash[150657]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8d72d4d1-4a38-4b8a-86f6-7f8c95dfd8ac/osd-block-c20101af-d2e8-44bf-8684-17f240235e25 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-10T09:58:26.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:26 vm00 ceph-mon[118593]: pgmap v341: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:58:26.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:26 vm00 ceph-mon[118593]: osdmap e128: 8 total, 7 up, 8 in 2026-03-10T09:58:26.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/ln -snf /dev/ceph-8d72d4d1-4a38-4b8a-86f6-7f8c95dfd8ac/osd-block-c20101af-d2e8-44bf-8684-17f240235e25 /var/lib/ceph/osd/ceph-3/block 2026-03-10T09:58:26.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 bash[150657]: Running command: /usr/bin/ln -snf /dev/ceph-8d72d4d1-4a38-4b8a-86f6-7f8c95dfd8ac/osd-block-c20101af-d2e8-44bf-8684-17f240235e25 /var/lib/ceph/osd/ceph-3/block 2026-03-10T09:58:26.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-10T09:58:26.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 bash[150657]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-10T09:58:26.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T09:58:26.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 bash[150657]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T09:58:26.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T09:58:26.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 bash[150657]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T09:58:26.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[150668]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 bash[150657]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 conmon[150668]: conmon 0e08842cb6e338cf24ac : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-0e08842cb6e338cf24ac7b38d0b42f90ae0e177fae823f5a95bead8f30d5c530.scope/container/memory.events 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 podman[150657]: 2026-03-10 09:58:26.208913656 +0000 UTC m=+1.021607499 container died 0e08842cb6e338cf24ac7b38d0b42f90ae0e177fae823f5a95bead8f30d5c530 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.license=GPLv2) 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 podman[150657]: 2026-03-10 09:58:26.241173332 +0000 UTC m=+1.053867165 container remove 0e08842cb6e338cf24ac7b38d0b42f90ae0e177fae823f5a95bead8f30d5c530 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default) 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 podman[150909]: 2026-03-10 09:58:26.3446854 +0000 UTC m=+0.017254753 container create 3d456c11a709e415ef28e73290c112e908e3da218d86567c0935cf43345e0893 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 podman[150909]: 2026-03-10 09:58:26.372803405 +0000 UTC m=+0.045372768 container init 3d456c11a709e415ef28e73290c112e908e3da218d86567c0935cf43345e0893 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 podman[150909]: 2026-03-10 09:58:26.380151146 +0000 UTC m=+0.052720499 container start 3d456c11a709e415ef28e73290c112e908e3da218d86567c0935cf43345e0893 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 bash[150909]: 3d456c11a709e415ef28e73290c112e908e3da218d86567c0935cf43345e0893 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 podman[150909]: 2026-03-10 09:58:26.337803661 +0000 UTC m=+0.010373023 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:58:26.620 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 systemd[1]: Started Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:58:26.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:26 vm00 ceph-mon[121907]: pgmap v341: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:58:26.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:26 vm00 ceph-mon[121907]: osdmap e128: 8 total, 7 up, 8 in 2026-03-10T09:58:26.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:26 vm03 ceph-mon[123760]: pgmap v341: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:58:26.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:26 vm03 ceph-mon[123760]: osdmap e128: 8 total, 7 up, 8 in 2026-03-10T09:58:27.119 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[150920]: 2026-03-10T09:58:26.734+0000 7f464fa03740 -1 Falling back to public interface 2026-03-10T09:58:27.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:26.954Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:27.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:26.954Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:27.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: pgmap v343: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 674 B/s rd, 0 op/s 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: pgmap v344: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 915 B/s rd, 0 op/s 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: pgmap v345: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: pgmap v346: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: pgmap v347: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: pgmap v348: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.725 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: pgmap v343: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 674 B/s rd, 0 op/s 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: pgmap v344: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 915 B/s rd, 0 op/s 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: pgmap v345: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: pgmap v346: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: pgmap v347: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: pgmap v348: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.726 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.727 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[150920]: 2026-03-10T09:58:27.384+0000 7f464fa03740 -1 osd.3 0 read_superblock omap replica is missing. 2026-03-10T09:58:27.727 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:27 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[150920]: 2026-03-10T09:58:27.457+0000 7f464fa03740 -1 osd.3 126 log_to_monitors true 2026-03-10T09:58:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: pgmap v343: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 674 B/s rd, 0 op/s 2026-03-10T09:58:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: pgmap v344: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 915 B/s rd, 0 op/s 2026-03-10T09:58:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: pgmap v345: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: pgmap v346: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: pgmap v347: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: pgmap v348: 161 pgs: 7 active+undersized, 25 stale+active+clean, 129 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:27.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:27.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:27.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:27.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:27.800 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v349: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v350: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v351: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v352: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v353: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v354: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v355: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v356: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v357: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v358: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v359: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v360: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v361: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v362: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v363: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 40/627 objects degraded (6.380%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='osd.3 [v2:192.168.123.100:6826/2132938329,v1:192.168.123.100:6827/2132938329]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v364: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v365: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v366: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v367: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v368: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: pgmap v369: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.621 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v349: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v350: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v351: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v352: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v353: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v354: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v355: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v356: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v357: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v358: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v359: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v360: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v361: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v362: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v363: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 40/627 objects degraded (6.380%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='osd.3 [v2:192.168.123.100:6826/2132938329,v1:192.168.123.100:6827/2132938329]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v364: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v365: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.622 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v366: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v367: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v368: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: pgmap v369: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.623 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v349: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v350: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v351: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v352: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v353: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v354: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v355: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v356: 161 pgs: 15 active+undersized, 19 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 23/627 objects degraded (3.668%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v357: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v358: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v359: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v360: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v361: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v362: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v363: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 40/627 objects degraded (6.380%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='osd.3 [v2:192.168.123.100:6826/2132938329,v1:192.168.123.100:6827/2132938329]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v364: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v365: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v366: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v367: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v368: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: pgmap v369: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:28.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: pgmap v370: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: pgmap v371: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: pgmap v372: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: pgmap v373: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: pgmap v374: 161 pgs: 33 active+undersized, 2 stale+active+clean, 17 active+undersized+degraded, 109 active+clean; 457 KiB data, 175 MiB used, 160 GiB / 160 GiB avail; 57/627 objects degraded (9.091%) 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='osd.3 [v2:192.168.123.100:6826/2132938329,v1:192.168.123.100:6827/2132938329]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: osdmap e129: 8 total, 7 up, 8 in 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.769 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: pgmap v370: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: pgmap v371: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: pgmap v372: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: pgmap v373: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: pgmap v374: 161 pgs: 33 active+undersized, 2 stale+active+clean, 17 active+undersized+degraded, 109 active+clean; 457 KiB data, 175 MiB used, 160 GiB / 160 GiB avail; 57/627 objects degraded (9.091%) 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='osd.3 [v2:192.168.123.100:6826/2132938329,v1:192.168.123.100:6827/2132938329]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: osdmap e129: 8 total, 7 up, 8 in 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:29.770 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:58:29.401+0000 7f8211476640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (1 PGs are or would become offline) 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: pgmap v370: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: pgmap v371: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: pgmap v372: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: pgmap v373: 161 pgs: 23 active+undersized, 14 stale+active+clean, 11 active+undersized+degraded, 113 active+clean; 457 KiB data, 174 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: pgmap v374: 161 pgs: 33 active+undersized, 2 stale+active+clean, 17 active+undersized+degraded, 109 active+clean; 457 KiB data, 175 MiB used, 160 GiB / 160 GiB avail; 57/627 objects degraded (9.091%) 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='osd.3 [v2:192.168.123.100:6826/2132938329,v1:192.168.123.100:6827/2132938329]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: osdmap e129: 8 total, 7 up, 8 in 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:29.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:30.092 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:29 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[150920]: 2026-03-10T09:58:29.767+0000 7f4646fad640 -1 osd.3 126 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:58:30.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: pgmap v376: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: pgmap v377: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: pgmap v378: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: pgmap v379: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: pgmap v380: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: pgmap v381: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: pgmap v382: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: pgmap v383: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: Upgrade: unsafe to stop osd(s) at this time (1 PGs are or would become offline) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[118593]: OSD bench result of 13726.806444 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: pgmap v376: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: pgmap v377: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: pgmap v378: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: pgmap v379: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: pgmap v380: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: pgmap v381: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: pgmap v382: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: pgmap v383: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: Upgrade: unsafe to stop osd(s) at this time (1 PGs are or would become offline) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T09:58:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:30 vm00 ceph-mon[121907]: OSD bench result of 13726.806444 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: pgmap v376: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: pgmap v377: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: pgmap v378: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: pgmap v379: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: pgmap v380: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: pgmap v381: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: pgmap v382: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: pgmap v383: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: Upgrade: unsafe to stop osd(s) at this time (1 PGs are or would become offline) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T09:58:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:30 vm03 ceph-mon[123760]: OSD bench result of 13726.806444 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T09:58:30.992 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:58:30] "GET /metrics HTTP/1.1" 200 38209 "" "Prometheus/2.51.0" 2026-03-10T09:58:31.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:58:31.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: osd.3 [v2:192.168.123.100:6826/2132938329,v1:192.168.123.100:6827/2132938329] boot 2026-03-10T09:58:31.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: osdmap e130: 8 total, 8 up, 8 in 2026-03-10T09:58:31.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:58:31.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: osd.3 [v2:192.168.123.100:6826/2132938329,v1:192.168.123.100:6827/2132938329] boot 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: osdmap e130: 8 total, 8 up, 8 in 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: osd.3 [v2:192.168.123.100:6826/2132938329,v1:192.168.123.100:6827/2132938329] boot 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: osdmap e130: 8 total, 8 up, 8 in 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:31.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:32.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:32 vm03 ceph-mon[123760]: pgmap v385: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:32.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:32 vm03 ceph-mon[123760]: osdmap e131: 8 total, 8 up, 8 in 2026-03-10T09:58:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:32 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:32.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:32 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:32 vm00 ceph-mon[118593]: pgmap v385: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:32 vm00 ceph-mon[118593]: osdmap e131: 8 total, 8 up, 8 in 2026-03-10T09:58:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:32.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:32 vm00 ceph-mon[121907]: pgmap v385: 161 pgs: 1 peering, 41 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 196 MiB used, 160 GiB / 160 GiB avail; 78/627 objects degraded (12.440%) 2026-03-10T09:58:32.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:32 vm00 ceph-mon[121907]: osdmap e131: 8 total, 8 up, 8 in 2026-03-10T09:58:32.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:32.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:33.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:33 vm00 ceph-mon[118593]: Health check update: Degraded data redundancy: 54/627 objects degraded (8.612%), 17 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:33.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:33 vm00 ceph-mon[121907]: Health check update: Degraded data redundancy: 54/627 objects degraded (8.612%), 17 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:34.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:33 vm03 ceph-mon[123760]: Health check update: Degraded data redundancy: 54/627 objects degraded (8.612%), 17 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:34.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:34 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:34.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:34 vm00 ceph-mon[118593]: pgmap v387: 161 pgs: 6 peering, 27 active+undersized, 17 active+undersized+degraded, 111 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 54/627 objects degraded (8.612%) 2026-03-10T09:58:34.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:34 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:34.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:34 vm00 ceph-mon[121907]: pgmap v387: 161 pgs: 6 peering, 27 active+undersized, 17 active+undersized+degraded, 111 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 54/627 objects degraded (8.612%) 2026-03-10T09:58:35.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:34 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:35.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:34 vm03 ceph-mon[123760]: pgmap v387: 161 pgs: 6 peering, 27 active+undersized, 17 active+undersized+degraded, 111 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 54/627 objects degraded (8.612%) 2026-03-10T09:58:36.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:35 vm03 ceph-mon[123760]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T09:58:36.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:35 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 54/627 objects degraded (8.612%), 17 pgs degraded) 2026-03-10T09:58:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:35 vm00 ceph-mon[118593]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T09:58:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:35 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 54/627 objects degraded (8.612%), 17 pgs degraded) 2026-03-10T09:58:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:35 vm00 ceph-mon[121907]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T09:58:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:35 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 54/627 objects degraded (8.612%), 17 pgs degraded) 2026-03-10T09:58:36.954 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:36 vm00 ceph-mon[118593]: pgmap v388: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:36.954 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:36 vm00 ceph-mon[121907]: pgmap v388: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:37.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:36 vm03 ceph-mon[123760]: pgmap v388: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:37.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:36.954Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:37.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:36.955Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:39.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:38 vm03 ceph-mon[123760]: pgmap v389: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:39.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:38 vm00 ceph-mon[118593]: pgmap v389: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:39.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:38 vm00 ceph-mon[121907]: pgmap v389: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:58:41.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:40 vm03 ceph-mon[123760]: pgmap v390: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 585 B/s rd, 0 op/s 2026-03-10T09:58:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:40 vm00 ceph-mon[118593]: pgmap v390: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 585 B/s rd, 0 op/s 2026-03-10T09:58:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:40 vm00 ceph-mon[121907]: pgmap v390: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 585 B/s rd, 0 op/s 2026-03-10T09:58:41.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:58:40] "GET /metrics HTTP/1.1" 200 38223 "" "Prometheus/2.51.0" 2026-03-10T09:58:43.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:42 vm03 ceph-mon[123760]: pgmap v391: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T09:58:43.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:42 vm00 ceph-mon[118593]: pgmap v391: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T09:58:43.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:42 vm00 ceph-mon[121907]: pgmap v391: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T09:58:44.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:44 vm00 ceph-mon[118593]: pgmap v392: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 436 B/s rd, 0 op/s 2026-03-10T09:58:44.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:44 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:44.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:44 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:44.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:44 vm00 ceph-mon[121907]: pgmap v392: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 436 B/s rd, 0 op/s 2026-03-10T09:58:44.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:44 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:44.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:44 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:44.940 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:44 vm03 ceph-mon[123760]: pgmap v392: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 436 B/s rd, 0 op/s 2026-03-10T09:58:44.940 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:44 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:44.940 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:44 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:45.756 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:45.757 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[118593]: Upgrade: osd.3 is safe to restart 2026-03-10T09:58:45.757 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:58:45.757 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:45.757 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[118593]: osd.3 marked itself down and dead 2026-03-10T09:58:45.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:45.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[121907]: Upgrade: osd.3 is safe to restart 2026-03-10T09:58:45.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:58:45.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:45.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:45 vm00 ceph-mon[121907]: osd.3 marked itself down and dead 2026-03-10T09:58:45.757 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 systemd[1]: Stopping Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:58:45.757 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[150920]: 2026-03-10T09:58:45.586+0000 7f464c998640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:58:45.757 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[150920]: 2026-03-10T09:58:45.586+0000 7f464c998640 -1 osd.3 131 *** Got signal Terminated *** 2026-03-10T09:58:45.757 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[150920]: 2026-03-10T09:58:45.586+0000 7f464c998640 -1 osd.3 131 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:58:45.846 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:45 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T09:58:45.847 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:45 vm03 ceph-mon[123760]: Upgrade: osd.3 is safe to restart 2026-03-10T09:58:45.847 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T09:58:45.847 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:45 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:45.847 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:45 vm03 ceph-mon[123760]: osd.3 marked itself down and dead 2026-03-10T09:58:46.035 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 podman[153150]: 2026-03-10 09:58:45.757281755 +0000 UTC m=+0.187176910 container died 3d456c11a709e415ef28e73290c112e908e3da218d86567c0935cf43345e0893 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:58:46.035 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 podman[153150]: 2026-03-10 09:58:45.780145318 +0000 UTC m=+0.210040483 container remove 3d456c11a709e415ef28e73290c112e908e3da218d86567c0935cf43345e0893 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0) 2026-03-10T09:58:46.035 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 bash[153150]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3 2026-03-10T09:58:46.035 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 podman[153220]: 2026-03-10 09:58:45.945014323 +0000 UTC m=+0.020244633 container create 72a0781dead407b0c882ebbe1e6d632ff71f8f69b11bb9f2d001b146184a276a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:58:46.035 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 podman[153220]: 2026-03-10 09:58:45.984995085 +0000 UTC m=+0.060225395 container init 72a0781dead407b0c882ebbe1e6d632ff71f8f69b11bb9f2d001b146184a276a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True) 2026-03-10T09:58:46.035 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 podman[153220]: 2026-03-10 09:58:45.988765853 +0000 UTC m=+0.063996153 container start 72a0781dead407b0c882ebbe1e6d632ff71f8f69b11bb9f2d001b146184a276a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:58:46.035 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:45 vm00 podman[153220]: 2026-03-10 09:58:45.989980807 +0000 UTC m=+0.065211117 container attach 72a0781dead407b0c882ebbe1e6d632ff71f8f69b11bb9f2d001b146184a276a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:58:46.035 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 podman[153220]: 2026-03-10 09:58:45.936606477 +0000 UTC m=+0.011836797 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:58:46.356 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 conmon[153231]: conmon 72a0781dead407b0c882 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-72a0781dead407b0c882ebbe1e6d632ff71f8f69b11bb9f2d001b146184a276a.scope/container/memory.events 2026-03-10T09:58:46.356 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 podman[153220]: 2026-03-10 09:58:46.134945608 +0000 UTC m=+0.210175918 container died 72a0781dead407b0c882ebbe1e6d632ff71f8f69b11bb9f2d001b146184a276a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3) 2026-03-10T09:58:46.357 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 podman[153220]: 2026-03-10 09:58:46.156748526 +0000 UTC m=+0.231978836 container remove 72a0781dead407b0c882ebbe1e6d632ff71f8f69b11bb9f2d001b146184a276a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2) 2026-03-10T09:58:46.357 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.3.service: Deactivated successfully. 2026-03-10T09:58:46.357 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 systemd[1]: Stopped Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:58:46.357 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.3.service: Consumed 1.224s CPU time. 2026-03-10T09:58:46.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 systemd[1]: Starting Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:58:46.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 podman[153321]: 2026-03-10 09:58:46.489920875 +0000 UTC m=+0.023515767 container create d19fb9403ac23efc2caea287f75820d66a21d41554726a88f72bda179e824258 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, io.buildah.version=1.41.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default) 2026-03-10T09:58:46.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 podman[153321]: 2026-03-10 09:58:46.531812853 +0000 UTC m=+0.065407756 container init d19fb9403ac23efc2caea287f75820d66a21d41554726a88f72bda179e824258 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:58:46.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 podman[153321]: 2026-03-10 09:58:46.53541683 +0000 UTC m=+0.069011722 container start d19fb9403ac23efc2caea287f75820d66a21d41554726a88f72bda179e824258 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2) 2026-03-10T09:58:46.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 podman[153321]: 2026-03-10 09:58:46.540003865 +0000 UTC m=+0.073598757 container attach d19fb9403ac23efc2caea287f75820d66a21d41554726a88f72bda179e824258 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:58:46.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 podman[153321]: 2026-03-10 09:58:46.482976016 +0000 UTC m=+0.016570918 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:58:46.955 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[118593]: Upgrade: Updating osd.3 2026-03-10T09:58:46.955 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[118593]: Deploying daemon osd.3 on vm00 2026-03-10T09:58:46.955 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[118593]: pgmap v393: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:58:46.955 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:58:46.955 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[118593]: osdmap e132: 8 total, 7 up, 8 in 2026-03-10T09:58:46.955 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[121907]: Upgrade: Updating osd.3 2026-03-10T09:58:46.955 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[121907]: Deploying daemon osd.3 on vm00 2026-03-10T09:58:46.955 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[121907]: pgmap v393: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:58:46.956 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:58:46.956 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:46 vm00 ceph-mon[121907]: osdmap e132: 8 total, 7 up, 8 in 2026-03-10T09:58:46.956 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:46.954Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:46.956 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:46.955Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:46.956 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:46.956 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 bash[153321]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:46.956 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:46.956 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:46 vm00 bash[153321]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:47.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:46 vm03 ceph-mon[123760]: Upgrade: Updating osd.3 2026-03-10T09:58:47.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:46 vm03 ceph-mon[123760]: Deploying daemon osd.3 on vm00 2026-03-10T09:58:47.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:46 vm03 ceph-mon[123760]: pgmap v393: 161 pgs: 161 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:58:47.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:46 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:58:47.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:46 vm03 ceph-mon[123760]: osdmap e132: 8 total, 7 up, 8 in 2026-03-10T09:58:47.369 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:58:47.369 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:47.369 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:58:47.369 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:47.370 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:47.370 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:58:47.370 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T09:58:47.370 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T09:58:47.370 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8d72d4d1-4a38-4b8a-86f6-7f8c95dfd8ac/osd-block-c20101af-d2e8-44bf-8684-17f240235e25 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-10T09:58:47.370 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8d72d4d1-4a38-4b8a-86f6-7f8c95dfd8ac/osd-block-c20101af-d2e8-44bf-8684-17f240235e25 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-10T09:58:47.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[121907]: osdmap e133: 8 total, 7 up, 8 in 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/ln -snf /dev/ceph-8d72d4d1-4a38-4b8a-86f6-7f8c95dfd8ac/osd-block-c20101af-d2e8-44bf-8684-17f240235e25 /var/lib/ceph/osd/ceph-3/block 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: Running command: /usr/bin/ln -snf /dev/ceph-8d72d4d1-4a38-4b8a-86f6-7f8c95dfd8ac/osd-block-c20101af-d2e8-44bf-8684-17f240235e25 /var/lib/ceph/osd/ceph-3/block 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate[153332]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153321]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 conmon[153332]: conmon d19fb9403ac23efc2cae : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-d19fb9403ac23efc2caea287f75820d66a21d41554726a88f72bda179e824258.scope/container/memory.events 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 podman[153321]: 2026-03-10 09:58:47.455158439 +0000 UTC m=+0.988753342 container died d19fb9403ac23efc2caea287f75820d66a21d41554726a88f72bda179e824258 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, CEPH_REF=squid, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0) 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 podman[153321]: 2026-03-10 09:58:47.47350059 +0000 UTC m=+1.007095482 container remove d19fb9403ac23efc2caea287f75820d66a21d41554726a88f72bda179e824258 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-activate, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 podman[153570]: 2026-03-10 09:58:47.58924046 +0000 UTC m=+0.020436231 container create 5468a49ce587f13fa466505447f148d6ad98c8bf9f67d8bf9f7da07a7e82fe61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_REF=squid, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2) 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 podman[153570]: 2026-03-10 09:58:47.623488803 +0000 UTC m=+0.054684585 container init 5468a49ce587f13fa466505447f148d6ad98c8bf9f67d8bf9f7da07a7e82fe61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 podman[153570]: 2026-03-10 09:58:47.627200661 +0000 UTC m=+0.058396432 container start 5468a49ce587f13fa466505447f148d6ad98c8bf9f67d8bf9f7da07a7e82fe61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0) 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 bash[153570]: 5468a49ce587f13fa466505447f148d6ad98c8bf9f67d8bf9f7da07a7e82fe61 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 podman[153570]: 2026-03-10 09:58:47.579592603 +0000 UTC m=+0.010788385 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:58:47.744 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:47 vm00 systemd[1]: Started Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:58:48.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:47 vm03 ceph-mon[123760]: osdmap e133: 8 total, 7 up, 8 in 2026-03-10T09:58:48.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:48.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:47 vm03 ceph-mon[123760]: pgmap v396: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 17/627 objects degraded (2.711%) 2026-03-10T09:58:48.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[121907]: pgmap v396: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 17/627 objects degraded (2.711%) 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[118593]: osdmap e133: 8 total, 7 up, 8 in 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[118593]: pgmap v396: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 17/627 objects degraded (2.711%) 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:48.504 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:48 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[153580]: 2026-03-10T09:58:48.234+0000 7fd2a686b740 -1 Falling back to public interface 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v397: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 610 B/s rd, 0 op/s; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v398: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 854 B/s rd, 0 op/s; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v399: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v400: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v401: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v402: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v403: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 17/627 objects degraded (2.711%), 5 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v404: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v405: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v406: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v407: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v408: 161 pgs: 21 active+undersized, 13 peering, 9 stale+active+clean, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 53/627 objects degraded (8.453%) 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v409: 161 pgs: 21 active+undersized, 13 peering, 9 stale+active+clean, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 53/627 objects degraded (8.453%) 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v410: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: pgmap v411: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v397: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 610 B/s rd, 0 op/s; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v398: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 854 B/s rd, 0 op/s; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v399: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v400: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v401: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v402: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v403: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 17/627 objects degraded (2.711%), 5 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v404: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v405: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v406: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v407: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v408: 161 pgs: 21 active+undersized, 13 peering, 9 stale+active+clean, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 53/627 objects degraded (8.453%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v409: 161 pgs: 21 active+undersized, 13 peering, 9 stale+active+clean, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 53/627 objects degraded (8.453%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v410: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: pgmap v411: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v397: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 610 B/s rd, 0 op/s; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v398: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 854 B/s rd, 0 op/s; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v399: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.121 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v400: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v401: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v402: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v403: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 17/627 objects degraded (2.711%), 5 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v404: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v405: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v406: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v407: 161 pgs: 8 active+undersized, 13 peering, 22 stale+active+clean, 5 active+undersized+degraded, 113 active+clean; 457 KiB data, 197 MiB used, 160 GiB / 160 GiB avail; 17/627 objects degraded (2.711%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v408: 161 pgs: 21 active+undersized, 13 peering, 9 stale+active+clean, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 53/627 objects degraded (8.453%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v409: 161 pgs: 21 active+undersized, 13 peering, 9 stale+active+clean, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 53/627 objects degraded (8.453%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v410: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: pgmap v411: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:49.122 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:49.622 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:49 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[153580]: 2026-03-10T09:58:49.291+0000 7fd2a686b740 -1 osd.3 131 log_to_monitors true 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: pgmap v412: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: pgmap v413: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: pgmap v414: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: pgmap v415: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: pgmap v416: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='osd.3 [v2:192.168.123.100:6826/1426943617,v1:192.168.123.100:6827/1426943617]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: pgmap v417: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.037 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: pgmap v418: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: pgmap v412: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: pgmap v413: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: pgmap v414: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: pgmap v415: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: pgmap v416: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='osd.3 [v2:192.168.123.100:6826/1426943617,v1:192.168.123.100:6827/1426943617]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: pgmap v417: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: pgmap v418: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.038 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:49 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: pgmap v412: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: pgmap v413: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: pgmap v414: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: pgmap v415: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: pgmap v416: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='osd.3 [v2:192.168.123.100:6826/1426943617,v1:192.168.123.100:6827/1426943617]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: pgmap v417: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: pgmap v418: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:49 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v419: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v420: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v421: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v422: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v423: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v424: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v425: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='osd.3 [v2:192.168.123.100:6826/1426943617,v1:192.168.123.100:6827/1426943617]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: osdmap e134: 8 total, 7 up, 8 in 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v427: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v428: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v429: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: pgmap v430: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:50 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:58:50] "GET /metrics HTTP/1.1" 200 38218 "" "Prometheus/2.51.0" 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v419: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v420: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v421: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v422: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v423: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v424: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v425: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='osd.3 [v2:192.168.123.100:6826/1426943617,v1:192.168.123.100:6827/1426943617]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: osdmap e134: 8 total, 7 up, 8 in 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v427: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v428: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v429: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: pgmap v430: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:50.872 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:50 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v419: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v420: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v421: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v422: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v423: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v424: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v425: 161 pgs: 27 active+undersized, 13 peering, 8 stale+active+clean, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='osd.3 [v2:192.168.123.100:6826/1426943617,v1:192.168.123.100:6827/1426943617]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: osdmap e134: 8 total, 7 up, 8 in 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v427: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v428: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v429: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: pgmap v430: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:50 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:51.835 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 09:58:51 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[153580]: 2026-03-10T09:58:51.394+0000 7fd29de15640 -1 osd.3 131 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:58:52.085 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.085 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.085 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.085 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.085 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: pgmap v431: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: pgmap v432: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: pgmap v433: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: pgmap v434: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: pgmap v435: 161 pgs: 6 unknown, 34 active+undersized, 8 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: pgmap v436: 161 pgs: 6 unknown, 34 active+undersized, 8 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: pgmap v437: 161 pgs: 42 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 0 B/s wr, 117 op/s; 78/627 objects degraded (12.440%) 2026-03-10T09:58:52.086 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[118593]: pgmap v438: 161 pgs: 42 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 91 KiB/s rd, 0 B/s wr, 152 op/s; 78/627 objects degraded (12.440%) 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: pgmap v431: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: pgmap v432: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: pgmap v433: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: pgmap v434: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: pgmap v435: 161 pgs: 6 unknown, 34 active+undersized, 8 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: pgmap v436: 161 pgs: 6 unknown, 34 active+undersized, 8 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.088 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.089 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.089 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: pgmap v437: 161 pgs: 42 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 0 B/s wr, 117 op/s; 78/627 objects degraded (12.440%) 2026-03-10T09:58:52.089 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:51 vm00 ceph-mon[121907]: pgmap v438: 161 pgs: 42 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 91 KiB/s rd, 0 B/s wr, 152 op/s; 78/627 objects degraded (12.440%) 2026-03-10T09:58:52.089 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:58:51 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:58:51.952+0000 7f8211476640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: pgmap v431: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: pgmap v432: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: pgmap v433: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: pgmap v434: 161 pgs: 8 unknown, 27 active+undersized, 13 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: pgmap v435: 161 pgs: 6 unknown, 34 active+undersized, 8 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: pgmap v436: 161 pgs: 6 unknown, 34 active+undersized, 8 peering, 18 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T09:58:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:58:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:58:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: pgmap v437: 161 pgs: 42 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 0 B/s wr, 117 op/s; 78/627 objects degraded (12.440%) 2026-03-10T09:58:52.299 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:51 vm03 ceph-mon[123760]: pgmap v438: 161 pgs: 42 active+undersized, 24 active+undersized+degraded, 95 active+clean; 457 KiB data, 198 MiB used, 160 GiB / 160 GiB avail; 91 KiB/s rd, 0 B/s wr, 152 op/s; 78/627 objects degraded (12.440%) 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: OSD bench result of 14919.372503 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: osd.3 [v2:192.168.123.100:6826/1426943617,v1:192.168.123.100:6827/1426943617] boot 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: osdmap e135: 8 total, 8 up, 8 in 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: OSD bench result of 14919.372503 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:52.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:58:53.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:58:53.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T09:58:53.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:58:53.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: osd.3 [v2:192.168.123.100:6826/1426943617,v1:192.168.123.100:6827/1426943617] boot 2026-03-10T09:58:53.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: osdmap e135: 8 total, 8 up, 8 in 2026-03-10T09:58:53.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:52 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:58:53.200 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: OSD bench result of 14919.372503 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T09:58:53.200 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:53.200 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T09:58:53.200 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: osd.3 [v2:192.168.123.100:6826/1426943617,v1:192.168.123.100:6827/1426943617] boot 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: osdmap e135: 8 total, 8 up, 8 in 2026-03-10T09:58:53.201 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:52 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T09:58:54.109 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:58:54.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:53 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:54.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:53 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:53 vm03 ceph-mon[123760]: osdmap e136: 8 total, 8 up, 8 in 2026-03-10T09:58:54.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:53 vm03 ceph-mon[123760]: pgmap v441: 161 pgs: 1 active+degraded, 1 active+recovering+degraded, 1 active+recovery_wait+degraded, 31 active+undersized, 17 active+undersized+degraded, 110 active+clean; 457 KiB data, 199 MiB used, 160 GiB / 160 GiB avail; 181 KiB/s rd, 0 B/s wr, 302 op/s; 67/627 objects degraded (10.686%); 0 B/s, 0 objects/s recovering 2026-03-10T09:58:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[118593]: osdmap e136: 8 total, 8 up, 8 in 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[118593]: pgmap v441: 161 pgs: 1 active+degraded, 1 active+recovering+degraded, 1 active+recovery_wait+degraded, 31 active+undersized, 17 active+undersized+degraded, 110 active+clean; 457 KiB data, 199 MiB used, 160 GiB / 160 GiB avail; 181 KiB/s rd, 0 B/s wr, 302 op/s; 67/627 objects degraded (10.686%); 0 B/s, 0 objects/s recovering 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[121907]: osdmap e136: 8 total, 8 up, 8 in 2026-03-10T09:58:54.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:53 vm00 ceph-mon[121907]: pgmap v441: 161 pgs: 1 active+degraded, 1 active+recovering+degraded, 1 active+recovery_wait+degraded, 31 active+undersized, 17 active+undersized+degraded, 110 active+clean; 457 KiB data, 199 MiB used, 160 GiB / 160 GiB avail; 181 KiB/s rd, 0 B/s wr, 302 op/s; 67/627 objects degraded (10.686%); 0 B/s, 0 objects/s recovering 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 1s ago 12m - - 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 2m ago 12m - - 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (6m) 1s ago 10m 26.0M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (6m) 2m ago 10m 47.7M - dad864ee21e9 011f2081bf92 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (4m) 1s ago 9m 53.0M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (3m) 2m ago 11m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (5m) 1s ago 12m 573M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (3m) 1s ago 12m 74.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (3m) 2m ago 12m 41.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (3m) 1s ago 12m 53.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (6m) 1s ago 10m 9651k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (6m) 2m ago 10m 9647k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (2m) 1s ago 11m 74.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (95s) 1s ago 11m 47.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 88805c559d1d 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (50s) 1s ago 11m 45.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e be8a08b99e6b 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (7s) 1s ago 11m 69.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5468a49ce587 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (11m) 2m ago 11m 55.7M 4096M 17.2.0 e1d6a67b021e 76735d749d5c 2026-03-10T09:58:54.688 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (11m) 2m ago 11m 53.1M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:58:54.689 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (11m) 2m ago 11m 51.1M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:58:54.689 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (10m) 2m ago 10m 52.8M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:58:54.689 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (4m) 2m ago 10m 44.0M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:58:54.689 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (10m) 1s ago 10m 95.1M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:58:54.689 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (10m) 2m ago 10m 95.3M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:58:54.689 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (10m) 1s ago 10m 95.9M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:58:54.689 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (10m) 2m ago 10m 92.5M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4, 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8, 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 9 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:58:54.967 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:58:55.212 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:58:55.212 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:58:55.212 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:58:55.212 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:58:55.212 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:58:55.212 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T09:58:55.212 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:58:55.212 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:58:55.213 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "9/23 daemons upgraded", 2026-03-10T09:58:55.213 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T09:58:55.213 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:58:55.213 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:58:55.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:54 vm03 ceph-mon[123760]: from='client.34323 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:55.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:54 vm03 ceph-mon[123760]: Health check update: Degraded data redundancy: 67/627 objects degraded (10.686%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:55.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:54 vm03 ceph-mon[123760]: from='client.54257 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:55.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:54 vm03 ceph-mon[123760]: from='client.54260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:55.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:54 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/565980748' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[118593]: from='client.34323 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[118593]: Health check update: Degraded data redundancy: 67/627 objects degraded (10.686%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[118593]: from='client.54257 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[118593]: from='client.54260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/565980748' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[121907]: from='client.34323 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[121907]: Health check update: Degraded data redundancy: 67/627 objects degraded (10.686%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[121907]: from='client.54257 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[121907]: from='client.54260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:55.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:54 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/565980748' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline; Degraded data redundancy: 67/627 objects degraded (10.686%), 20 pgs degraded 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] PG_DEGRADED: Degraded data redundancy: 67/627 objects degraded (10.686%), 20 pgs degraded 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.9 is active+undersized+degraded, acting [1,7] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.a is active+undersized+degraded, acting [1,7] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.13 is active+undersized+degraded, acting [0,4] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.19 is active+undersized+degraded, acting [6,0] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.1 is active+undersized+degraded, acting [0,4] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.2 is active+undersized+degraded, acting [5,6] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.5 is active+recovering+degraded, acting [5,3,2] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.7 is active+undersized+degraded, acting [7,0] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.8 is active+undersized+degraded, acting [1,7] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.b is active+undersized+degraded, acting [0,4] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.c is active+degraded, acting [5,3,6] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.12 is active+undersized+degraded, acting [0,7] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.15 is active+recovery_wait+degraded, acting [7,3,4] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.17 is active+undersized+degraded, acting [0,5] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.18 is active+undersized+degraded, acting [0,1] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.19 is active+undersized+degraded, acting [1,4] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.1e is active+undersized+degraded, acting [6,2] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.0 is active+undersized+degraded, acting [7,0] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.14 is active+undersized+degraded, acting [1,7] 2026-03-10T09:58:55.475 INFO:teuthology.orchestra.run.vm00.stdout: pg 6.c is active+undersized+degraded, acting [6,5] 2026-03-10T09:58:56.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:55 vm03 ceph-mon[123760]: from='client.44317 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:56.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:56 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3115989910' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:58:56.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:56 vm03 ceph-mon[123760]: pgmap v442: 161 pgs: 1 active+recovery_wait, 1 active+degraded, 1 active+recovering+degraded, 1 active+recovery_wait+degraded, 14 active+undersized, 7 active+undersized+degraded, 136 active+clean; 457 KiB data, 203 MiB used, 160 GiB / 160 GiB avail; 148 KiB/s rd, 0 B/s wr, 247 op/s; 33/627 objects degraded (5.263%); 0 B/s, 5 objects/s recovering 2026-03-10T09:58:56.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:56 vm00 ceph-mon[121907]: from='client.44317 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:56.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:56 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3115989910' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:58:56.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:56 vm00 ceph-mon[121907]: pgmap v442: 161 pgs: 1 active+recovery_wait, 1 active+degraded, 1 active+recovering+degraded, 1 active+recovery_wait+degraded, 14 active+undersized, 7 active+undersized+degraded, 136 active+clean; 457 KiB data, 203 MiB used, 160 GiB / 160 GiB avail; 148 KiB/s rd, 0 B/s wr, 247 op/s; 33/627 objects degraded (5.263%); 0 B/s, 5 objects/s recovering 2026-03-10T09:58:56.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:55 vm00 ceph-mon[118593]: from='client.44317 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:58:56.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:56 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3115989910' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:58:56.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:56 vm00 ceph-mon[118593]: pgmap v442: 161 pgs: 1 active+recovery_wait, 1 active+degraded, 1 active+recovering+degraded, 1 active+recovery_wait+degraded, 14 active+undersized, 7 active+undersized+degraded, 136 active+clean; 457 KiB data, 203 MiB used, 160 GiB / 160 GiB avail; 148 KiB/s rd, 0 B/s wr, 247 op/s; 33/627 objects degraded (5.263%); 0 B/s, 5 objects/s recovering 2026-03-10T09:58:57.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:56.955Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:57.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:58:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:58:56.955Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:58:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:57 vm03 ceph-mon[123760]: pgmap v443: 161 pgs: 3 active+recovery_wait, 1 active+degraded, 1 active+recovering+degraded, 1 active+recovery_wait+degraded, 155 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 160 KiB/s rd, 0 B/s wr, 268 op/s; 9/627 objects degraded (1.435%); 0 B/s, 6 objects/s recovering 2026-03-10T09:58:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:57 vm00 ceph-mon[118593]: pgmap v443: 161 pgs: 3 active+recovery_wait, 1 active+degraded, 1 active+recovering+degraded, 1 active+recovery_wait+degraded, 155 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 160 KiB/s rd, 0 B/s wr, 268 op/s; 9/627 objects degraded (1.435%); 0 B/s, 6 objects/s recovering 2026-03-10T09:58:58.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:57 vm00 ceph-mon[121907]: pgmap v443: 161 pgs: 3 active+recovery_wait, 1 active+degraded, 1 active+recovering+degraded, 1 active+recovery_wait+degraded, 155 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 160 KiB/s rd, 0 B/s wr, 268 op/s; 9/627 objects degraded (1.435%); 0 B/s, 6 objects/s recovering 2026-03-10T09:59:00.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:59 vm03 ceph-mon[123760]: pgmap v444: 161 pgs: 3 active+recovery_wait, 158 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 203 KiB/s rd, 0 B/s wr, 338 op/s; 0 B/s, 5 objects/s recovering 2026-03-10T09:59:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:58:59 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 9/627 objects degraded (1.435%), 3 pgs degraded) 2026-03-10T09:59:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:59 vm00 ceph-mon[118593]: pgmap v444: 161 pgs: 3 active+recovery_wait, 158 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 203 KiB/s rd, 0 B/s wr, 338 op/s; 0 B/s, 5 objects/s recovering 2026-03-10T09:59:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:58:59 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 9/627 objects degraded (1.435%), 3 pgs degraded) 2026-03-10T09:59:00.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:59 vm00 ceph-mon[121907]: pgmap v444: 161 pgs: 3 active+recovery_wait, 158 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 203 KiB/s rd, 0 B/s wr, 338 op/s; 0 B/s, 5 objects/s recovering 2026-03-10T09:59:00.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:58:59 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 9/627 objects degraded (1.435%), 3 pgs degraded) 2026-03-10T09:59:01.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:59:00] "GET /metrics HTTP/1.1" 200 38218 "" "Prometheus/2.51.0" 2026-03-10T09:59:02.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:01 vm03 ceph-mon[123760]: pgmap v445: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 168 KiB/s rd, 0 B/s wr, 280 op/s; 0 B/s, 4 objects/s recovering 2026-03-10T09:59:02.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:01 vm00 ceph-mon[121907]: pgmap v445: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 168 KiB/s rd, 0 B/s wr, 280 op/s; 0 B/s, 4 objects/s recovering 2026-03-10T09:59:02.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:01 vm00 ceph-mon[118593]: pgmap v445: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 168 KiB/s rd, 0 B/s wr, 280 op/s; 0 B/s, 4 objects/s recovering 2026-03-10T09:59:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:03.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:03 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:03.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:03.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:03 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:03.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:03.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:03 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:04 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:04.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:04 vm00 ceph-mon[118593]: pgmap v446: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 153 KiB/s rd, 0 B/s wr, 254 op/s; 0 B/s, 4 objects/s recovering 2026-03-10T09:59:04.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:04 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:04.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:04 vm00 ceph-mon[121907]: pgmap v446: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 153 KiB/s rd, 0 B/s wr, 254 op/s; 0 B/s, 4 objects/s recovering 2026-03-10T09:59:04.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:04.150Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:04.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:04 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:04.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:04 vm03 ceph-mon[123760]: pgmap v446: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 153 KiB/s rd, 0 B/s wr, 254 op/s; 0 B/s, 4 objects/s recovering 2026-03-10T09:59:06.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:05 vm03 ceph-mon[123760]: pgmap v447: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 0 B/s wr, 183 op/s; 0 B/s, 3 objects/s recovering 2026-03-10T09:59:06.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:05 vm00 ceph-mon[118593]: pgmap v447: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 0 B/s wr, 183 op/s; 0 B/s, 3 objects/s recovering 2026-03-10T09:59:06.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:05 vm00 ceph-mon[121907]: pgmap v447: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 0 B/s wr, 183 op/s; 0 B/s, 3 objects/s recovering 2026-03-10T09:59:07.262 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:07 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:59:07.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:07 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:59:07.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:06.955Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:07.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:06.956Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:07.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:07 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:59:08.048 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 systemd[1]: Stopping Ceph osd.4 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: Upgrade: osd.4 is safe to restart 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: Upgrade: Updating osd.4 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: Deploying daemon osd.4 on vm03 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: pgmap v448: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 92 KiB/s rd, 0 B/s wr, 152 op/s; 0 B/s, 1 objects/s recovering 2026-03-10T09:59:08.352 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:08 vm03 ceph-mon[123760]: osd.4 marked itself down and dead 2026-03-10T09:59:08.352 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[54312]: 2026-03-10T09:59:08.095+0000 7f71a4b1c700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:59:08.352 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[54312]: 2026-03-10T09:59:08.095+0000 7f71a4b1c700 -1 osd.4 136 *** Got signal Terminated *** 2026-03-10T09:59:08.352 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[54312]: 2026-03-10T09:59:08.095+0000 7f71a4b1c700 -1 osd.4 136 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:59:08.352 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 podman[147319]: 2026-03-10 09:59:08.348761804 +0000 UTC m=+0.266043081 container died 76735d749d5c08f111b24f1e3543e01866bc2661960d1552bf2caead71ffee3e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.tags=base centos centos-stream, release=754, name=centos-stream, distribution-scope=public, maintainer=Guillaume Abrioux , version=8, ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, com.redhat.component=centos-stream-container, architecture=x86_64, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.display-name=CentOS Stream 8, vendor=Red Hat, Inc., io.buildah.version=1.19.8, GIT_CLEAN=True) 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: Upgrade: osd.4 is safe to restart 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: Upgrade: Updating osd.4 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: Deploying daemon osd.4 on vm03 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: pgmap v448: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 92 KiB/s rd, 0 B/s wr, 152 op/s; 0 B/s, 1 objects/s recovering 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[118593]: osd.4 marked itself down and dead 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: Upgrade: osd.4 is safe to restart 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: Upgrade: Updating osd.4 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: Deploying daemon osd.4 on vm03 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: pgmap v448: 161 pgs: 161 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 92 KiB/s rd, 0 B/s wr, 152 op/s; 0 B/s, 1 objects/s recovering 2026-03-10T09:59:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:08 vm00 ceph-mon[121907]: osd.4 marked itself down and dead 2026-03-10T09:59:08.630 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 podman[147319]: 2026-03-10 09:59:08.377853352 +0000 UTC m=+0.295134629 container remove 76735d749d5c08f111b24f1e3543e01866bc2661960d1552bf2caead71ffee3e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, name=centos-stream, vendor=Red Hat, Inc., build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, RELEASE=HEAD, ceph=True, io.buildah.version=1.19.8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, distribution-scope=public, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, CEPH_POINT_RELEASE=-17.2.0, io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.tags=base centos centos-stream, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, architecture=x86_64, release=754, GIT_CLEAN=True, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754) 2026-03-10T09:59:08.630 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 bash[147319]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4 2026-03-10T09:59:08.630 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 podman[147388]: 2026-03-10 09:59:08.538630244 +0000 UTC m=+0.019071315 container create ee3bfc472334d153294d0790fb635cb075b280e6d402be497059626741e22b46 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3) 2026-03-10T09:59:08.630 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 podman[147388]: 2026-03-10 09:59:08.577524038 +0000 UTC m=+0.057965109 container init ee3bfc472334d153294d0790fb635cb075b280e6d402be497059626741e22b46 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:59:08.630 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 podman[147388]: 2026-03-10 09:59:08.581878007 +0000 UTC m=+0.062319078 container start ee3bfc472334d153294d0790fb635cb075b280e6d402be497059626741e22b46 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:59:08.630 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 podman[147388]: 2026-03-10 09:59:08.585134672 +0000 UTC m=+0.065575743 container attach ee3bfc472334d153294d0790fb635cb075b280e6d402be497059626741e22b46 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223) 2026-03-10T09:59:08.630 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 podman[147388]: 2026-03-10 09:59:08.53134021 +0000 UTC m=+0.011781292 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:59:08.882 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 podman[147406]: 2026-03-10 09:59:08.741777226 +0000 UTC m=+0.012548867 container died ee3bfc472334d153294d0790fb635cb075b280e6d402be497059626741e22b46 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS) 2026-03-10T09:59:08.882 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 podman[147406]: 2026-03-10 09:59:08.759102764 +0000 UTC m=+0.029874394 container remove ee3bfc472334d153294d0790fb635cb075b280e6d402be497059626741e22b46 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3) 2026-03-10T09:59:08.882 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.4.service: Deactivated successfully. 2026-03-10T09:59:08.882 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 systemd[1]: Stopped Ceph osd.4 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:59:08.882 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.4.service: Consumed 45.961s CPU time. 2026-03-10T09:59:09.144 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:08 vm03 systemd[1]: Starting Ceph osd.4 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:59:09.144 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 podman[147492]: 2026-03-10 09:59:09.052171404 +0000 UTC m=+0.017575034 container create 0a45a23fe33fee708cee8339d189acf8c5242505e6cd442e3968852fda72c471 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS) 2026-03-10T09:59:09.144 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 podman[147492]: 2026-03-10 09:59:09.090304425 +0000 UTC m=+0.055708055 container init 0a45a23fe33fee708cee8339d189acf8c5242505e6cd442e3968852fda72c471 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T09:59:09.144 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 podman[147492]: 2026-03-10 09:59:09.093637132 +0000 UTC m=+0.059040762 container start 0a45a23fe33fee708cee8339d189acf8c5242505e6cd442e3968852fda72c471 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T09:59:09.144 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 podman[147492]: 2026-03-10 09:59:09.094565921 +0000 UTC m=+0.059969540 container attach 0a45a23fe33fee708cee8339d189acf8c5242505e6cd442e3968852fda72c471 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-10T09:59:09.144 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 podman[147492]: 2026-03-10 09:59:09.045426109 +0000 UTC m=+0.010829749 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:59:09.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:09 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:59:09.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:09 vm03 ceph-mon[123760]: osdmap e137: 8 total, 7 up, 8 in 2026-03-10T09:59:09.548 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:09.548 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:09.548 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:09.548 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:09.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:09 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:59:09.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:09 vm00 ceph-mon[118593]: osdmap e137: 8 total, 7 up, 8 in 2026-03-10T09:59:09.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:09 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:59:09.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:09 vm00 ceph-mon[121907]: osdmap e137: 8 total, 7 up, 8 in 2026-03-10T09:59:09.959 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:59:09.959 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:09.959 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:59:09.959 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:09.959 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:09.959 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:09.959 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T09:59:09.959 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T09:59:09.959 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-d80ccbf4-c988-43a9-a7cb-b02af5b27d11/osd-block-fa9a846e-614f-4e56-875b-56d674cbe31c --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-10T09:59:09.960 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-d80ccbf4-c988-43a9-a7cb-b02af5b27d11/osd-block-fa9a846e-614f-4e56-875b-56d674cbe31c --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-10T09:59:09.960 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/ln -snf /dev/ceph-d80ccbf4-c988-43a9-a7cb-b02af5b27d11/osd-block-fa9a846e-614f-4e56-875b-56d674cbe31c /var/lib/ceph/osd/ceph-4/block 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/ln -snf /dev/ceph-d80ccbf4-c988-43a9-a7cb-b02af5b27d11/osd-block-fa9a846e-614f-4e56-875b-56d674cbe31c /var/lib/ceph/osd/ceph-4/block 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate[147502]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 bash[147492]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 conmon[147502]: conmon 0a45a23fe33fee708cee : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-0a45a23fe33fee708cee8339d189acf8c5242505e6cd442e3968852fda72c471.scope/container/memory.events 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:09 vm03 podman[147492]: 2026-03-10 09:59:09.991629696 +0000 UTC m=+0.957033326 container died 0a45a23fe33fee708cee8339d189acf8c5242505e6cd442e3968852fda72c471 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:10 vm03 podman[147492]: 2026-03-10 09:59:10.012356067 +0000 UTC m=+0.977759697 container remove 0a45a23fe33fee708cee8339d189acf8c5242505e6cd442e3968852fda72c471 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-activate, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:10 vm03 podman[147737]: 2026-03-10 09:59:10.114855547 +0000 UTC m=+0.019764523 container create 0e3f0f82bc78bcccc69a9e29e08a090ac315ca5fb375025d1a44ec1514ddb6be (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:10 vm03 podman[147737]: 2026-03-10 09:59:10.155937146 +0000 UTC m=+0.060846122 container init 0e3f0f82bc78bcccc69a9e29e08a090ac315ca5fb375025d1a44ec1514ddb6be (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS) 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:10 vm03 podman[147737]: 2026-03-10 09:59:10.159437136 +0000 UTC m=+0.064346112 container start 0e3f0f82bc78bcccc69a9e29e08a090ac315ca5fb375025d1a44ec1514ddb6be (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, ceph=True, OSD_FLAVOR=default) 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:10 vm03 bash[147737]: 0e3f0f82bc78bcccc69a9e29e08a090ac315ca5fb375025d1a44ec1514ddb6be 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:10 vm03 podman[147737]: 2026-03-10 09:59:10.107626918 +0000 UTC m=+0.012535894 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:59:10.299 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:10 vm03 systemd[1]: Started Ceph osd.4 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:59:10.580 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:10 vm03 ceph-mon[123760]: osdmap e138: 8 total, 7 up, 8 in 2026-03-10T09:59:10.580 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:10 vm03 ceph-mon[123760]: pgmap v451: 161 pgs: 32 peering, 14 stale+active+clean, 115 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T09:59:10.580 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:10.580 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:10 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:10.580 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[147747]: 2026-03-10T09:59:10.492+0000 7fdb10998740 -1 Falling back to public interface 2026-03-10T09:59:10.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:10 vm00 ceph-mon[118593]: osdmap e138: 8 total, 7 up, 8 in 2026-03-10T09:59:10.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:10 vm00 ceph-mon[118593]: pgmap v451: 161 pgs: 32 peering, 14 stale+active+clean, 115 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T09:59:10.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:10.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:10 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:10.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:10 vm00 ceph-mon[121907]: osdmap e138: 8 total, 7 up, 8 in 2026-03-10T09:59:10.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:10 vm00 ceph-mon[121907]: pgmap v451: 161 pgs: 32 peering, 14 stale+active+clean, 115 active+clean; 457 KiB data, 221 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T09:59:10.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:10.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:10 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:11.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:59:10] "GET /metrics HTTP/1.1" 200 38217 "" "Prometheus/2.51.0" 2026-03-10T09:59:11.407 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:11 vm03 ceph-mon[123760]: Health check failed: Reduced data availability: 5 pgs inactive, 8 pgs peering (PG_AVAILABILITY) 2026-03-10T09:59:11.797 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[147747]: 2026-03-10T09:59:11.416+0000 7fdb10998740 -1 osd.4 0 read_superblock omap replica is missing. 2026-03-10T09:59:11.797 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:11 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[147747]: 2026-03-10T09:59:11.442+0000 7fdb10998740 -1 osd.4 136 log_to_monitors true 2026-03-10T09:59:11.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:11 vm00 ceph-mon[118593]: Health check failed: Reduced data availability: 5 pgs inactive, 8 pgs peering (PG_AVAILABILITY) 2026-03-10T09:59:11.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:11 vm00 ceph-mon[121907]: Health check failed: Reduced data availability: 5 pgs inactive, 8 pgs peering (PG_AVAILABILITY) 2026-03-10T09:59:12.470 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:12 vm03 ceph-mon[123760]: from='osd.4 [v2:192.168.123.103:6800/2375929651,v1:192.168.123.103:6801/2375929651]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:59:12.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:12 vm03 ceph-mon[123760]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:59:12.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:12 vm03 ceph-mon[123760]: pgmap v452: 161 pgs: 14 active+undersized, 32 peering, 3 stale+active+clean, 11 active+undersized+degraded, 101 active+clean; 457 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:59:12.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.471 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[118593]: from='osd.4 [v2:192.168.123.103:6800/2375929651,v1:192.168.123.103:6801/2375929651]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[118593]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[118593]: pgmap v452: 161 pgs: 14 active+undersized, 32 peering, 3 stale+active+clean, 11 active+undersized+degraded, 101 active+clean; 457 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[121907]: from='osd.4 [v2:192.168.123.103:6800/2375929651,v1:192.168.123.103:6801/2375929651]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[121907]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[121907]: pgmap v452: 161 pgs: 14 active+undersized, 32 peering, 3 stale+active+clean, 11 active+undersized+degraded, 101 active+clean; 457 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 38/627 objects degraded (6.061%) 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:12.797 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 09:59:12 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[147747]: 2026-03-10T09:59:12.469+0000 7fdb08743640 -1 osd.4 136 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: osdmap e139: 8 total, 7 up, 8 in 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='osd.4 [v2:192.168.123.103:6800/2375929651,v1:192.168.123.103:6801/2375929651]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 38/627 objects degraded (6.061%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: osdmap e139: 8 total, 7 up, 8 in 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='osd.4 [v2:192.168.123.103:6800/2375929651,v1:192.168.123.103:6801/2375929651]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 38/627 objects degraded (6.061%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:13.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.511 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:13 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:59:13.221+0000 7f8211476640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: osdmap e139: 8 total, 7 up, 8 in 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='osd.4 [v2:192.168.123.103:6800/2375929651,v1:192.168.123.103:6801/2375929651]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 38/627 objects degraded (6.061%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:13.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:13.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:14.399 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:14.149Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:14.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: pgmap v454: 161 pgs: 22 active+undersized, 32 peering, 14 active+undersized+degraded, 93 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: pgmap v455: 161 pgs: 22 active+undersized, 32 peering, 14 active+undersized+degraded, 93 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: Upgrade: unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: osd.4 [v2:192.168.123.103:6800/2375929651,v1:192.168.123.103:6801/2375929651] boot 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: osdmap e140: 8 total, 8 up, 8 in 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:59:14.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: pgmap v454: 161 pgs: 22 active+undersized, 32 peering, 14 active+undersized+degraded, 93 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: pgmap v455: 161 pgs: 22 active+undersized, 32 peering, 14 active+undersized+degraded, 93 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: Upgrade: unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: osd.4 [v2:192.168.123.103:6800/2375929651,v1:192.168.123.103:6801/2375929651] boot 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: osdmap e140: 8 total, 8 up, 8 in 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: pgmap v454: 161 pgs: 22 active+undersized, 32 peering, 14 active+undersized+degraded, 93 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: pgmap v455: 161 pgs: 22 active+undersized, 32 peering, 14 active+undersized+degraded, 93 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: Upgrade: unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: osd.4 [v2:192.168.123.103:6800/2375929651,v1:192.168.123.103:6801/2375929651] boot 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: osdmap e140: 8 total, 8 up, 8 in 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T09:59:14.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:15 vm03 ceph-mon[123760]: osdmap e141: 8 total, 8 up, 8 in 2026-03-10T09:59:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:15 vm00 ceph-mon[118593]: osdmap e141: 8 total, 8 up, 8 in 2026-03-10T09:59:15.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:15 vm00 ceph-mon[121907]: osdmap e141: 8 total, 8 up, 8 in 2026-03-10T09:59:16.462 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:16 vm03 ceph-mon[123760]: pgmap v458: 161 pgs: 41 active+undersized, 27 active+undersized+degraded, 93 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 108/627 objects degraded (17.225%) 2026-03-10T09:59:16.462 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:16 vm03 ceph-mon[123760]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 5 pgs inactive, 8 pgs peering) 2026-03-10T09:59:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:16 vm00 ceph-mon[118593]: pgmap v458: 161 pgs: 41 active+undersized, 27 active+undersized+degraded, 93 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 108/627 objects degraded (17.225%) 2026-03-10T09:59:16.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:16 vm00 ceph-mon[118593]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 5 pgs inactive, 8 pgs peering) 2026-03-10T09:59:16.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:16 vm00 ceph-mon[121907]: pgmap v458: 161 pgs: 41 active+undersized, 27 active+undersized+degraded, 93 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 108/627 objects degraded (17.225%) 2026-03-10T09:59:16.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:16 vm00 ceph-mon[121907]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 5 pgs inactive, 8 pgs peering) 2026-03-10T09:59:17.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:16.956Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:17.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:16.957Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:18.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:18.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:18.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:18 vm00 ceph-mon[118593]: pgmap v459: 161 pgs: 26 active+undersized, 20 active+undersized+degraded, 115 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 79/627 objects degraded (12.600%) 2026-03-10T09:59:18.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:18.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:18.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:18 vm00 ceph-mon[121907]: pgmap v459: 161 pgs: 26 active+undersized, 20 active+undersized+degraded, 115 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 79/627 objects degraded (12.600%) 2026-03-10T09:59:18.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:18.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:18.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:18 vm03 ceph-mon[123760]: pgmap v459: 161 pgs: 26 active+undersized, 20 active+undersized+degraded, 115 active+clean; 457 KiB data, 242 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 79/627 objects degraded (12.600%) 2026-03-10T09:59:19.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:19 vm00 ceph-mon[118593]: Health check update: Degraded data redundancy: 79/627 objects degraded (12.600%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:19.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:19 vm00 ceph-mon[121907]: Health check update: Degraded data redundancy: 79/627 objects degraded (12.600%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:19.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:19 vm03 ceph-mon[123760]: Health check update: Degraded data redundancy: 79/627 objects degraded (12.600%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:20.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:20 vm00 ceph-mon[118593]: pgmap v460: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:59:20.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:20 vm00 ceph-mon[121907]: pgmap v460: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:59:20.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:20 vm03 ceph-mon[123760]: pgmap v460: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:59:21.082 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:59:20] "GET /metrics HTTP/1.1" 200 38088 "" "Prometheus/2.51.0" 2026-03-10T09:59:21.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:21 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 79/627 objects degraded (12.600%), 20 pgs degraded) 2026-03-10T09:59:21.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:21 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 79/627 objects degraded (12.600%), 20 pgs degraded) 2026-03-10T09:59:21.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:21 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 79/627 objects degraded (12.600%), 20 pgs degraded) 2026-03-10T09:59:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:22 vm00 ceph-mon[118593]: pgmap v461: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:59:22.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:22 vm00 ceph-mon[121907]: pgmap v461: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:59:22.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:22 vm03 ceph-mon[123760]: pgmap v461: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:59:24.495 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:24 vm03 ceph-mon[123760]: pgmap v462: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:59:24.495 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:24 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:24.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:24 vm00 ceph-mon[118593]: pgmap v462: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:59:24.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:24 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:24.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:24 vm00 ceph-mon[121907]: pgmap v462: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T09:59:24.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:24 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:25.703 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 12s ago 12m - - 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 14s ago 12m - - 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (7m) 12s ago 11m 26.0M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (6m) 14s ago 10m 49.3M - dad864ee21e9 011f2081bf92 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (5m) 12s ago 10m 53.0M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (4m) 14s ago 12m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (6m) 12s ago 13m 574M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (4m) 12s ago 13m 74.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (3m) 14s ago 12m 48.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (3m) 12s ago 12m 52.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (7m) 12s ago 11m 9655k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (6m) 14s ago 10m 9755k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (2m) 12s ago 12m 76.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (2m) 12s ago 12m 49.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 88805c559d1d 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (81s) 12s ago 12m 46.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e be8a08b99e6b 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (38s) 12s ago 11m 72.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5468a49ce587 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (15s) 14s ago 11m 12.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 0e3f0f82bc78 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (11m) 14s ago 11m 61.2M 4096M 17.2.0 e1d6a67b021e 5b5ce45100ff 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (11m) 14s ago 11m 59.2M 4096M 17.2.0 e1d6a67b021e d485462ed497 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (11m) 14s ago 11m 60.2M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (5m) 14s ago 10m 48.5M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (10m) 12s ago 10m 95.2M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (10m) 14s ago 10m 96.0M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (10m) 12s ago 10m 96.2M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:59:26.105 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (10m) 14s ago 10m 93.2M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:59:26.356 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:26 vm00 ceph-mon[121907]: pgmap v463: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 957 B/s rd, 0 op/s 2026-03-10T09:59:26.356 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:26 vm00 ceph-mon[121907]: from='client.34356 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:26.356 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:26 vm00 ceph-mon[118593]: pgmap v463: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 957 B/s rd, 0 op/s 2026-03-10T09:59:26.356 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:26 vm00 ceph-mon[118593]: from='client.34356 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3, 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:59:26.356 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:59:26.357 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 7, 2026-03-10T09:59:26.357 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 10 2026-03-10T09:59:26.357 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:59:26.357 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "10/23 daemons upgraded", 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:59:26.560 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:59:26.586 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:26 vm03 ceph-mon[123760]: pgmap v463: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 957 B/s rd, 0 op/s 2026-03-10T09:59:26.586 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:26 vm03 ceph-mon[123760]: from='client.34356 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:26.805 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:59:26.805 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:59:26.805 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:59:27.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:26.957Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:27.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:26.958Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:27.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:27 vm03 ceph-mon[123760]: from='client.34362 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:27.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:27 vm03 ceph-mon[123760]: from='client.34368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:27.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:27 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/2225338653' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:27.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:27 vm03 ceph-mon[123760]: from='client.44353 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:27.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:27 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3750783947' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:59:27.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[118593]: from='client.34362 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[118593]: from='client.34368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/2225338653' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[118593]: from='client.44353 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3750783947' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[121907]: from='client.34362 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[121907]: from='client.34368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/2225338653' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[121907]: from='client.44353 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3750783947' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:59:27.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:28.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:28 vm00 ceph-mon[118593]: pgmap v464: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:59:28.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:28 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:28.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:28 vm00 ceph-mon[121907]: pgmap v464: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:59:28.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:28 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:28.774 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:28 vm03 ceph-mon[123760]: pgmap v464: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:59:28.774 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:28 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:29.321 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 systemd[1]: Stopping Ceph osd.5 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:59:29.588 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[56998]: 2026-03-10T09:59:29.400+0000 7fea9b461700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:59:29.588 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[56998]: 2026-03-10T09:59:29.400+0000 7fea9b461700 -1 osd.5 141 *** Got signal Terminated *** 2026-03-10T09:59:29.588 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[56998]: 2026-03-10T09:59:29.400+0000 7fea9b461700 -1 osd.5 141 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:59:29.840 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 podman[149842]: 2026-03-10 09:59:29.62955693 +0000 UTC m=+0.243497515 container died 5b5ce45100ff2d1b62b0b56b109d741d3fcaae8d974f7e8ec34b08476ebabcf9 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5, io.openshift.tags=base centos centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, io.openshift.expose-services=, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, GIT_CLEAN=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, build-date=2022-05-03T08:36:31.336870, maintainer=Guillaume Abrioux , com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, CEPH_POINT_RELEASE=-17.2.0, version=8, GIT_BRANCH=HEAD, RELEASE=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, architecture=x86_64, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, release=754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, name=centos-stream, vendor=Red Hat, Inc., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream) 2026-03-10T09:59:29.840 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 podman[149842]: 2026-03-10 09:59:29.662943847 +0000 UTC m=+0.276884443 container remove 5b5ce45100ff2d1b62b0b56b109d741d3fcaae8d974f7e8ec34b08476ebabcf9 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, RELEASE=HEAD, io.openshift.tags=base centos centos-stream, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , io.buildah.version=1.19.8, vcs-type=git, ceph=True, io.k8s.display-name=CentOS Stream 8, version=8, name=centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, CEPH_POINT_RELEASE=-17.2.0, distribution-scope=public, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, build-date=2022-05-03T08:36:31.336870, release=754, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754) 2026-03-10T09:59:29.840 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 bash[149842]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5 2026-03-10T09:59:29.840 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 podman[149908]: 2026-03-10 09:59:29.81862941 +0000 UTC m=+0.017693266 container create 306401d2cd1c9b1fc92ab3767eccc291e3108b8a84f2ab06f712d0ca87f6c485 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid) 2026-03-10T09:59:29.840 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:29 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:29.840 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:29 vm03 ceph-mon[123760]: Upgrade: osd.5 is safe to restart 2026-03-10T09:59:29.840 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:29 vm03 ceph-mon[123760]: Upgrade: Updating osd.5 2026-03-10T09:59:29.840 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:29.840 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T09:59:29.840 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:29.841 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:29 vm03 ceph-mon[123760]: Deploying daemon osd.5 on vm03 2026-03-10T09:59:29.841 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:29 vm03 ceph-mon[123760]: osd.5 marked itself down and dead 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[118593]: Upgrade: osd.5 is safe to restart 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[118593]: Upgrade: Updating osd.5 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[118593]: Deploying daemon osd.5 on vm03 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[118593]: osd.5 marked itself down and dead 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[121907]: Upgrade: osd.5 is safe to restart 2026-03-10T09:59:29.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[121907]: Upgrade: Updating osd.5 2026-03-10T09:59:29.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:29.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T09:59:29.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:29.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[121907]: Deploying daemon osd.5 on vm03 2026-03-10T09:59:29.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:29 vm00 ceph-mon[121907]: osd.5 marked itself down and dead 2026-03-10T09:59:30.118 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 podman[149908]: 2026-03-10 09:59:29.858797019 +0000 UTC m=+0.057860884 container init 306401d2cd1c9b1fc92ab3767eccc291e3108b8a84f2ab06f712d0ca87f6c485 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, io.buildah.version=1.41.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:59:30.118 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 podman[149908]: 2026-03-10 09:59:29.862653476 +0000 UTC m=+0.061717332 container start 306401d2cd1c9b1fc92ab3767eccc291e3108b8a84f2ab06f712d0ca87f6c485 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T09:59:30.118 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 podman[149908]: 2026-03-10 09:59:29.863585149 +0000 UTC m=+0.062648996 container attach 306401d2cd1c9b1fc92ab3767eccc291e3108b8a84f2ab06f712d0ca87f6c485 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:59:30.118 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:29 vm03 podman[149908]: 2026-03-10 09:59:29.81185984 +0000 UTC m=+0.010923696 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:59:30.118 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 podman[149927]: 2026-03-10 09:59:30.014356976 +0000 UTC m=+0.011365552 container died 306401d2cd1c9b1fc92ab3767eccc291e3108b8a84f2ab06f712d0ca87f6c485 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:59:30.118 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 podman[149927]: 2026-03-10 09:59:30.033087603 +0000 UTC m=+0.030096189 container remove 306401d2cd1c9b1fc92ab3767eccc291e3108b8a84f2ab06f712d0ca87f6c485 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:59:30.118 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.5.service: Deactivated successfully. 2026-03-10T09:59:30.118 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 systemd[1]: Stopped Ceph osd.5 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:59:30.118 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.5.service: Consumed 40.975s CPU time. 2026-03-10T09:59:30.422 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 systemd[1]: Starting Ceph osd.5 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:59:30.422 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 podman[150014]: 2026-03-10 09:59:30.330029092 +0000 UTC m=+0.017954745 container create 900b69633e39a9d6637370c617bc65bb5ee16d0808c0e7ebbcb5a038687bd64a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:59:30.422 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 podman[150014]: 2026-03-10 09:59:30.370946073 +0000 UTC m=+0.058871736 container init 900b69633e39a9d6637370c617bc65bb5ee16d0808c0e7ebbcb5a038687bd64a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:59:30.422 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 podman[150014]: 2026-03-10 09:59:30.37492996 +0000 UTC m=+0.062855613 container start 900b69633e39a9d6637370c617bc65bb5ee16d0808c0e7ebbcb5a038687bd64a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:59:30.422 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 podman[150014]: 2026-03-10 09:59:30.379220129 +0000 UTC m=+0.067145791 container attach 900b69633e39a9d6637370c617bc65bb5ee16d0808c0e7ebbcb5a038687bd64a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:59:30.798 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 podman[150014]: 2026-03-10 09:59:30.322758424 +0000 UTC m=+0.010684077 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:59:30.798 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:30.798 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 bash[150014]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:30.798 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:30.798 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 bash[150014]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:30 vm03 ceph-mon[123760]: pgmap v465: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:59:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:30 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:59:30.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:30 vm03 ceph-mon[123760]: osdmap e142: 8 total, 7 up, 8 in 2026-03-10T09:59:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:30 vm00 ceph-mon[118593]: pgmap v465: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:59:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:30 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:59:30.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:30 vm00 ceph-mon[118593]: osdmap e142: 8 total, 7 up, 8 in 2026-03-10T09:59:30.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:30 vm00 ceph-mon[121907]: pgmap v465: 161 pgs: 161 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T09:59:30.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:30 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:59:30.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:30 vm00 ceph-mon[121907]: osdmap e142: 8 total, 7 up, 8 in 2026-03-10T09:59:30.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:59:30] "GET /metrics HTTP/1.1" 200 38088 "" "Prometheus/2.51.0" 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 bash[150014]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 bash[150014]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 bash[150014]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 bash[150014]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-56fdc59b-0435-4567-91d3-75b51690129b/osd-block-f2832919-d86c-4c2f-b301-84a2776e8ec6 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-10T09:59:31.275 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:30 vm03 bash[150014]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-56fdc59b-0435-4567-91d3-75b51690129b/osd-block-f2832919-d86c-4c2f-b301-84a2776e8ec6 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/ln -snf /dev/ceph-56fdc59b-0435-4567-91d3-75b51690129b/osd-block-f2832919-d86c-4c2f-b301-84a2776e8ec6 /var/lib/ceph/osd/ceph-5/block 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 bash[150014]: Running command: /usr/bin/ln -snf /dev/ceph-56fdc59b-0435-4567-91d3-75b51690129b/osd-block-f2832919-d86c-4c2f-b301-84a2776e8ec6 /var/lib/ceph/osd/ceph-5/block 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 bash[150014]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 bash[150014]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 bash[150014]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate[150025]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 bash[150014]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 conmon[150025]: conmon 900b69633e39a9d66373 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-900b69633e39a9d6637370c617bc65bb5ee16d0808c0e7ebbcb5a038687bd64a.scope/container/memory.events 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 podman[150014]: 2026-03-10 09:59:31.306427111 +0000 UTC m=+0.994352764 container died 900b69633e39a9d6637370c617bc65bb5ee16d0808c0e7ebbcb5a038687bd64a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2) 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 podman[150014]: 2026-03-10 09:59:31.325799119 +0000 UTC m=+1.013724772 container remove 900b69633e39a9d6637370c617bc65bb5ee16d0808c0e7ebbcb5a038687bd64a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 podman[150263]: 2026-03-10 09:59:31.423627866 +0000 UTC m=+0.015777902 container create 3c6d3a12c7d36a8eaa0da0575d975b04b989eaae79133db2d7b7626fc54c2c68 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 podman[150263]: 2026-03-10 09:59:31.469659459 +0000 UTC m=+0.061809495 container init 3c6d3a12c7d36a8eaa0da0575d975b04b989eaae79133db2d7b7626fc54c2c68 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 podman[150263]: 2026-03-10 09:59:31.472365263 +0000 UTC m=+0.064515299 container start 3c6d3a12c7d36a8eaa0da0575d975b04b989eaae79133db2d7b7626fc54c2c68 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, OSD_FLAVOR=default) 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 bash[150263]: 3c6d3a12c7d36a8eaa0da0575d975b04b989eaae79133db2d7b7626fc54c2c68 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 podman[150263]: 2026-03-10 09:59:31.417053351 +0000 UTC m=+0.009203397 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:59:31.549 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:31 vm03 systemd[1]: Started Ceph osd.5 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:59:31.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:31 vm00 ceph-mon[118593]: osdmap e143: 8 total, 7 up, 8 in 2026-03-10T09:59:31.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:31.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:31 vm00 ceph-mon[121907]: osdmap e143: 8 total, 7 up, 8 in 2026-03-10T09:59:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:31.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:31.910 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:31 vm03 ceph-mon[123760]: osdmap e143: 8 total, 7 up, 8 in 2026-03-10T09:59:31.910 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:31.910 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:32.450 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T09:59:32.294+0000 7fe705829740 -1 Falling back to public interface 2026-03-10T09:59:32.701 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:32 vm03 ceph-mon[123760]: pgmap v468: 161 pgs: 10 peering, 16 stale+active+clean, 135 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:59:32.701 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:32 vm03 ceph-mon[123760]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T09:59:32.701 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:32 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:32.701 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:32 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:32 vm00 ceph-mon[118593]: pgmap v468: 161 pgs: 10 peering, 16 stale+active+clean, 135 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:59:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:32 vm00 ceph-mon[118593]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T09:59:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:32.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:32.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:32 vm00 ceph-mon[121907]: pgmap v468: 161 pgs: 10 peering, 16 stale+active+clean, 135 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:59:32.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:32 vm00 ceph-mon[121907]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T09:59:32.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:32.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:32.974 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T09:59:32.928+0000 7fe705829740 -1 osd.5 0 read_superblock omap replica is missing. 2026-03-10T09:59:32.974 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T09:59:32.972+0000 7fe705829740 -1 osd.5 141 log_to_monitors true 2026-03-10T09:59:33.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='osd.5 [v2:192.168.123.103:6808/2118745427,v1:192.168.123.103:6809/2118745427]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: pgmap v469: 161 pgs: 24 active+undersized, 10 peering, 5 stale+active+clean, 10 active+undersized+degraded, 112 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 37/627 objects degraded (5.901%) 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='osd.5 [v2:192.168.123.103:6808/2118745427,v1:192.168.123.103:6809/2118745427]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: pgmap v469: 161 pgs: 24 active+undersized, 10 peering, 5 stale+active+clean, 10 active+undersized+degraded, 112 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 37/627 objects degraded (5.901%) 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:33.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='osd.5 [v2:192.168.123.103:6808/2118745427,v1:192.168.123.103:6809/2118745427]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: pgmap v469: 161 pgs: 24 active+undersized, 10 peering, 5 stale+active+clean, 10 active+undersized+degraded, 112 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 37/627 objects degraded (5.901%) 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:34.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:34.298 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 09:59:33 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T09:59:33.858+0000 7fe6fd5d4640 -1 osd.5 141 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:59:34.619 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:34 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:59:34.563+0000 7f8211476640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 37/627 objects degraded (5.901%), 10 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: osdmap e144: 8 total, 7 up, 8 in 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='osd.5 [v2:192.168.123.103:6808/2118745427,v1:192.168.123.103:6809/2118745427]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: pgmap v471: 161 pgs: 33 active+undersized, 10 peering, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 54/627 objects degraded (8.612%) 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: pgmap v472: 161 pgs: 33 active+undersized, 10 peering, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 54/627 objects degraded (8.612%) 2026-03-10T09:59:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[118593]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 37/627 objects degraded (5.901%), 10 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: osdmap e144: 8 total, 7 up, 8 in 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='osd.5 [v2:192.168.123.103:6808/2118745427,v1:192.168.123.103:6809/2118745427]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: pgmap v471: 161 pgs: 33 active+undersized, 10 peering, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 54/627 objects degraded (8.612%) 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: pgmap v472: 161 pgs: 33 active+undersized, 10 peering, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 54/627 objects degraded (8.612%) 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:34 vm00 ceph-mon[121907]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 37/627 objects degraded (5.901%), 10 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: osdmap e144: 8 total, 7 up, 8 in 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='osd.5 [v2:192.168.123.103:6808/2118745427,v1:192.168.123.103:6809/2118745427]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: pgmap v471: 161 pgs: 33 active+undersized, 10 peering, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 54/627 objects degraded (8.612%) 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: pgmap v472: 161 pgs: 33 active+undersized, 10 peering, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 54/627 objects degraded (8.612%) 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:35.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:34 vm03 ceph-mon[123760]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-10T09:59:36.108 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:35 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:59:36.108 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:35 vm03 ceph-mon[123760]: osd.5 [v2:192.168.123.103:6808/2118745427,v1:192.168.123.103:6809/2118745427] boot 2026-03-10T09:59:36.108 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:35 vm03 ceph-mon[123760]: osdmap e145: 8 total, 8 up, 8 in 2026-03-10T09:59:36.108 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:59:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:35 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:59:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:35 vm00 ceph-mon[118593]: osd.5 [v2:192.168.123.103:6808/2118745427,v1:192.168.123.103:6809/2118745427] boot 2026-03-10T09:59:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:35 vm00 ceph-mon[118593]: osdmap e145: 8 total, 8 up, 8 in 2026-03-10T09:59:36.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:59:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:35 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:59:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:35 vm00 ceph-mon[121907]: osd.5 [v2:192.168.123.103:6808/2118745427,v1:192.168.123.103:6809/2118745427] boot 2026-03-10T09:59:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:35 vm00 ceph-mon[121907]: osdmap e145: 8 total, 8 up, 8 in 2026-03-10T09:59:36.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T09:59:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:36 vm00 ceph-mon[118593]: osdmap e146: 8 total, 8 up, 8 in 2026-03-10T09:59:37.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:36 vm00 ceph-mon[118593]: pgmap v475: 161 pgs: 33 active+undersized, 10 peering, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 611 B/s rd, 0 op/s; 54/627 objects degraded (8.612%) 2026-03-10T09:59:37.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:36.958Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:37.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:36.959Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:36 vm00 ceph-mon[121907]: osdmap e146: 8 total, 8 up, 8 in 2026-03-10T09:59:37.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:36 vm00 ceph-mon[121907]: pgmap v475: 161 pgs: 33 active+undersized, 10 peering, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 611 B/s rd, 0 op/s; 54/627 objects degraded (8.612%) 2026-03-10T09:59:37.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:36 vm03 ceph-mon[123760]: osdmap e146: 8 total, 8 up, 8 in 2026-03-10T09:59:37.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:36 vm03 ceph-mon[123760]: pgmap v475: 161 pgs: 33 active+undersized, 10 peering, 16 active+undersized+degraded, 102 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 611 B/s rd, 0 op/s; 54/627 objects degraded (8.612%) 2026-03-10T09:59:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:39 vm00 ceph-mon[118593]: pgmap v476: 161 pgs: 9 active+undersized, 10 peering, 7 active+undersized+degraded, 135 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 878 B/s rd, 0 op/s; 22/627 objects degraded (3.509%) 2026-03-10T09:59:39.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:39 vm00 ceph-mon[121907]: pgmap v476: 161 pgs: 9 active+undersized, 10 peering, 7 active+undersized+degraded, 135 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 878 B/s rd, 0 op/s; 22/627 objects degraded (3.509%) 2026-03-10T09:59:40.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:39 vm03 ceph-mon[123760]: pgmap v476: 161 pgs: 9 active+undersized, 10 peering, 7 active+undersized+degraded, 135 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 878 B/s rd, 0 op/s; 22/627 objects degraded (3.509%) 2026-03-10T09:59:41.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:40 vm03 ceph-mon[123760]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T09:59:41.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:40 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 22/627 objects degraded (3.509%), 7 pgs degraded) 2026-03-10T09:59:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:40 vm00 ceph-mon[118593]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T09:59:41.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:40 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 22/627 objects degraded (3.509%), 7 pgs degraded) 2026-03-10T09:59:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:40 vm00 ceph-mon[121907]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T09:59:41.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:40 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 22/627 objects degraded (3.509%), 7 pgs degraded) 2026-03-10T09:59:41.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:59:40] "GET /metrics HTTP/1.1" 200 38229 "" "Prometheus/2.51.0" 2026-03-10T09:59:42.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:41 vm03 ceph-mon[123760]: pgmap v477: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T09:59:42.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:41 vm00 ceph-mon[118593]: pgmap v477: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T09:59:42.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:41 vm00 ceph-mon[121907]: pgmap v477: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T09:59:44.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:43 vm03 ceph-mon[123760]: pgmap v478: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:59:44.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:43 vm00 ceph-mon[118593]: pgmap v478: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:59:44.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:43 vm00 ceph-mon[121907]: pgmap v478: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T09:59:45.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:44 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:45.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:44 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:45.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:44 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:46.046 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:45 vm03 ceph-mon[123760]: pgmap v479: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 847 B/s rd, 0 op/s 2026-03-10T09:59:46.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:45 vm00 ceph-mon[118593]: pgmap v479: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 847 B/s rd, 0 op/s 2026-03-10T09:59:46.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:45 vm00 ceph-mon[121907]: pgmap v479: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 847 B/s rd, 0 op/s 2026-03-10T09:59:47.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:46.959Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:47.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:46.960Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:48.003 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:47 vm03 ceph-mon[123760]: pgmap v480: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 864 B/s rd, 0 op/s 2026-03-10T09:59:48.003 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:48.003 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:48.003 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:48.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:47 vm00 ceph-mon[118593]: pgmap v480: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 864 B/s rd, 0 op/s 2026-03-10T09:59:48.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:48.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:48.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:48.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:47 vm00 ceph-mon[121907]: pgmap v480: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 864 B/s rd, 0 op/s 2026-03-10T09:59:48.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:48.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T09:59:48.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:49 vm03 ceph-mon[123760]: pgmap v481: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:59:50.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:49 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:50.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:49 vm00 ceph-mon[118593]: pgmap v481: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:59:50.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:49 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:50.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:49 vm00 ceph-mon[121907]: pgmap v481: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T09:59:50.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:49 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:51.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:50 vm03 systemd[1]: Stopping Ceph osd.6 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:59:51.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[59401]: 2026-03-10T09:59:50.756+0000 7f5ae6a95700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T09:59:51.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[59401]: 2026-03-10T09:59:50.756+0000 7f5ae6a95700 -1 osd.6 146 *** Got signal Terminated *** 2026-03-10T09:59:51.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:50 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[59401]: 2026-03-10T09:59:50.756+0000 7f5ae6a95700 -1 osd.6 146 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T09:59:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:50 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:50 vm03 ceph-mon[123760]: Upgrade: osd.6 is safe to restart 2026-03-10T09:59:51.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T09:59:51.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:50 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[118593]: Upgrade: osd.6 is safe to restart 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[121907]: Upgrade: osd.6 is safe to restart 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:50 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:51.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:50 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:09:59:50] "GET /metrics HTTP/1.1" 200 38237 "" "Prometheus/2.51.0" 2026-03-10T09:59:51.937 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:51 vm03 podman[152362]: 2026-03-10 09:59:51.730347551 +0000 UTC m=+0.989240076 container died d485462ed4973fc9adb387211ee1303c6ff9b07bba3087bade949c96bc7feb87 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6, GIT_REPO=https://github.com/ceph/ceph-container.git, GIT_BRANCH=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, com.redhat.component=centos-stream-container, vcs-type=git, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.expose-services=, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, ceph=True, name=centos-stream, release=754, io.buildah.version=1.19.8, maintainer=Guillaume Abrioux , architecture=x86_64, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True) 2026-03-10T09:59:51.937 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:51 vm03 podman[152362]: 2026-03-10 09:59:51.759180035 +0000 UTC m=+1.018072550 container remove d485462ed4973fc9adb387211ee1303c6ff9b07bba3087bade949c96bc7feb87 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, ceph=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, version=8, name=centos-stream, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-type=git, com.redhat.component=centos-stream-container, io.openshift.expose-services=, distribution-scope=public, maintainer=Guillaume Abrioux , GIT_CLEAN=True, architecture=x86_64, io.buildah.version=1.19.8, release=754, RELEASE=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, build-date=2022-05-03T08:36:31.336870, io.openshift.tags=base centos centos-stream, io.k8s.display-name=CentOS Stream 8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/) 2026-03-10T09:59:51.937 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:51 vm03 bash[152362]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6 2026-03-10T09:59:51.937 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:51 vm03 ceph-mon[123760]: Upgrade: Updating osd.6 2026-03-10T09:59:51.937 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:51 vm03 ceph-mon[123760]: Deploying daemon osd.6 on vm03 2026-03-10T09:59:51.937 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:51 vm03 ceph-mon[123760]: pgmap v482: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:59:51.937 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:51 vm03 ceph-mon[123760]: osd.6 marked itself down and dead 2026-03-10T09:59:52.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:51 vm00 ceph-mon[118593]: Upgrade: Updating osd.6 2026-03-10T09:59:52.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:51 vm00 ceph-mon[118593]: Deploying daemon osd.6 on vm03 2026-03-10T09:59:52.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:51 vm00 ceph-mon[118593]: pgmap v482: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:59:52.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:51 vm00 ceph-mon[118593]: osd.6 marked itself down and dead 2026-03-10T09:59:52.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:51 vm00 ceph-mon[121907]: Upgrade: Updating osd.6 2026-03-10T09:59:52.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:51 vm00 ceph-mon[121907]: Deploying daemon osd.6 on vm03 2026-03-10T09:59:52.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:51 vm00 ceph-mon[121907]: pgmap v482: 161 pgs: 161 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T09:59:52.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:51 vm00 ceph-mon[121907]: osd.6 marked itself down and dead 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:51 vm03 podman[152430]: 2026-03-10 09:59:51.936537425 +0000 UTC m=+0.021038577 container create e89d47a593616b9fd89eba510d3e5a887c1016888a363ee23e386055b9859997 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:51 vm03 podman[152430]: 2026-03-10 09:59:51.981012336 +0000 UTC m=+0.065513488 container init e89d47a593616b9fd89eba510d3e5a887c1016888a363ee23e386055b9859997 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid) 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:51 vm03 podman[152430]: 2026-03-10 09:59:51.984084887 +0000 UTC m=+0.068586039 container start e89d47a593616b9fd89eba510d3e5a887c1016888a363ee23e386055b9859997 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, ceph=True) 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:51 vm03 podman[152430]: 2026-03-10 09:59:51.985056345 +0000 UTC m=+0.069557497 container attach e89d47a593616b9fd89eba510d3e5a887c1016888a363ee23e386055b9859997 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.license=GPLv2) 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 podman[152430]: 2026-03-10 09:59:51.928395827 +0000 UTC m=+0.012896989 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 podman[152430]: 2026-03-10 09:59:52.128947919 +0000 UTC m=+0.213449071 container died e89d47a593616b9fd89eba510d3e5a887c1016888a363ee23e386055b9859997 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3) 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 podman[152430]: 2026-03-10 09:59:52.148146251 +0000 UTC m=+0.232647403 container remove e89d47a593616b9fd89eba510d3e5a887c1016888a363ee23e386055b9859997 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.6.service: Deactivated successfully. 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 systemd[1]: Stopped Ceph osd.6 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:59:52.254 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.6.service: Consumed 5.001s CPU time. 2026-03-10T09:59:52.548 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 systemd[1]: Starting Ceph osd.6 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T09:59:52.548 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 podman[152532]: 2026-03-10 09:59:52.474988684 +0000 UTC m=+0.019242576 container create 0d77945a686be577dcefe89ffa126ae5bf810bdea62d45a22f0a3498eb9a1dca (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-10T09:59:52.548 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 podman[152532]: 2026-03-10 09:59:52.518679988 +0000 UTC m=+0.062933880 container init 0d77945a686be577dcefe89ffa126ae5bf810bdea62d45a22f0a3498eb9a1dca (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T09:59:52.548 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 podman[152532]: 2026-03-10 09:59:52.522363713 +0000 UTC m=+0.066617605 container start 0d77945a686be577dcefe89ffa126ae5bf810bdea62d45a22f0a3498eb9a1dca (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, OSD_FLAVOR=default) 2026-03-10T09:59:52.548 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 podman[152532]: 2026-03-10 09:59:52.523198706 +0000 UTC m=+0.067452599 container attach 0d77945a686be577dcefe89ffa126ae5bf810bdea62d45a22f0a3498eb9a1dca (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid) 2026-03-10T09:59:53.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:52 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:59:53.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:52 vm03 ceph-mon[123760]: osdmap e147: 8 total, 7 up, 8 in 2026-03-10T09:59:53.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 podman[152532]: 2026-03-10 09:59:52.466547805 +0000 UTC m=+0.010801708 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:59:53.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:53.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 bash[152532]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:53.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:53.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:52 vm03 bash[152532]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:53.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:52 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:59:53.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:52 vm00 ceph-mon[118593]: osdmap e147: 8 total, 7 up, 8 in 2026-03-10T09:59:53.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:52 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T09:59:53.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:52 vm00 ceph-mon[121907]: osdmap e147: 8 total, 7 up, 8 in 2026-03-10T09:59:53.465 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:59:53.465 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:53.465 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T09:59:53.465 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:53.465 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:53.465 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T09:59:53.465 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T09:59:53.466 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T09:59:53.466 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-1389d0a4-868a-4944-95e5-76d0fdbdc054/osd-block-310b60d4-374c-41a3-be5a-54d72a8a8262 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-10T09:59:53.466 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-1389d0a4-868a-4944-95e5-76d0fdbdc054/osd-block-310b60d4-374c-41a3-be5a-54d72a8a8262 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-10T09:59:53.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:53 vm03 ceph-mon[123760]: pgmap v484: 161 pgs: 20 peering, 6 stale+active+clean, 135 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:59:53.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:53 vm03 ceph-mon[123760]: Health check failed: Reduced data availability: 1 pg inactive, 2 pgs peering (PG_AVAILABILITY) 2026-03-10T09:59:53.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:53 vm03 ceph-mon[123760]: osdmap e148: 8 total, 7 up, 8 in 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/ln -snf /dev/ceph-1389d0a4-868a-4944-95e5-76d0fdbdc054/osd-block-310b60d4-374c-41a3-be5a-54d72a8a8262 /var/lib/ceph/osd/ceph-6/block 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: Running command: /usr/bin/ln -snf /dev/ceph-1389d0a4-868a-4944-95e5-76d0fdbdc054/osd-block-310b60d4-374c-41a3-be5a-54d72a8a8262 /var/lib/ceph/osd/ceph-6/block 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate[152542]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152532]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 conmon[152542]: conmon 0d77945a686be577dcef : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-0d77945a686be577dcefe89ffa126ae5bf810bdea62d45a22f0a3498eb9a1dca.scope/container/memory.events 2026-03-10T09:59:53.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 podman[152532]: 2026-03-10 09:59:53.501271219 +0000 UTC m=+1.045525111 container died 0d77945a686be577dcefe89ffa126ae5bf810bdea62d45a22f0a3498eb9a1dca (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3) 2026-03-10T09:59:53.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 podman[152532]: 2026-03-10 09:59:53.520183206 +0000 UTC m=+1.064437107 container remove 0d77945a686be577dcefe89ffa126ae5bf810bdea62d45a22f0a3498eb9a1dca (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-activate, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T09:59:53.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 podman[152790]: 2026-03-10 09:59:53.609585516 +0000 UTC m=+0.016083424 container create 09cb5d413a117bdd5f91db1cfe495f2d4378003cd81461f1d986873cd95ad68e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-10T09:59:53.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 podman[152790]: 2026-03-10 09:59:53.651673459 +0000 UTC m=+0.058171377 container init 09cb5d413a117bdd5f91db1cfe495f2d4378003cd81461f1d986873cd95ad68e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3) 2026-03-10T09:59:53.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 podman[152790]: 2026-03-10 09:59:53.655170715 +0000 UTC m=+0.061668623 container start 09cb5d413a117bdd5f91db1cfe495f2d4378003cd81461f1d986873cd95ad68e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, OSD_FLAVOR=default) 2026-03-10T09:59:53.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 bash[152790]: 09cb5d413a117bdd5f91db1cfe495f2d4378003cd81461f1d986873cd95ad68e 2026-03-10T09:59:53.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 podman[152790]: 2026-03-10 09:59:53.603172883 +0000 UTC m=+0.009670791 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T09:59:53.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 systemd[1]: Started Ceph osd.6 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T09:59:53.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:53 vm00 ceph-mon[121907]: pgmap v484: 161 pgs: 20 peering, 6 stale+active+clean, 135 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:59:53.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:53 vm00 ceph-mon[121907]: Health check failed: Reduced data availability: 1 pg inactive, 2 pgs peering (PG_AVAILABILITY) 2026-03-10T09:59:53.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:53 vm00 ceph-mon[121907]: osdmap e148: 8 total, 7 up, 8 in 2026-03-10T09:59:53.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:53 vm00 ceph-mon[118593]: pgmap v484: 161 pgs: 20 peering, 6 stale+active+clean, 135 active+clean; 457 KiB data, 260 MiB used, 160 GiB / 160 GiB avail 2026-03-10T09:59:53.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:53 vm00 ceph-mon[118593]: Health check failed: Reduced data availability: 1 pg inactive, 2 pgs peering (PG_AVAILABILITY) 2026-03-10T09:59:53.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:53 vm00 ceph-mon[118593]: osdmap e148: 8 total, 7 up, 8 in 2026-03-10T09:59:54.297 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:53 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T09:59:53.970+0000 7f287c510740 -1 Falling back to public interface 2026-03-10T09:59:54.582 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:54 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T09:59:54.579+0000 7f287c510740 -1 osd.6 0 read_superblock omap replica is missing. 2026-03-10T09:59:54.905 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:54 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:54.905 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:54.905 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:54.905 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:54.905 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:54.905 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:54 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:54.905 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:54 vm03 ceph-mon[123760]: from='osd.6 [v2:192.168.123.103:6816/3276374127,v1:192.168.123.103:6817/3276374127]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T09:59:54.905 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:54 vm03 ceph-mon[123760]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T09:59:54.905 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:54 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T09:59:54.622+0000 7f287c510740 -1 osd.6 146 log_to_monitors true 2026-03-10T09:59:54.905 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 09:59:54 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T09:59:54.783+0000 7f28742bb640 -1 osd.6 146 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T09:59:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[118593]: from='osd.6 [v2:192.168.123.103:6816/3276374127,v1:192.168.123.103:6817/3276374127]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T09:59:55.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[118593]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T09:59:55.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T09:59:55.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:55.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[121907]: from='osd.6 [v2:192.168.123.103:6816/3276374127,v1:192.168.123.103:6817/3276374127]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T09:59:55.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:54 vm00 ceph-mon[121907]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T09:59:56.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: pgmap v486: 161 pgs: 10 active+undersized, 20 peering, 3 stale+active+clean, 9 active+undersized+degraded, 119 active+clean; 457 KiB data, 261 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-10T09:59:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 29/627 objects degraded (4.625%), 9 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T09:59:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: osdmap e149: 8 total, 7 up, 8 in 2026-03-10T09:59:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: from='osd.6 [v2:192.168.123.103:6816/3276374127,v1:192.168.123.103:6817/3276374127]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: pgmap v486: 161 pgs: 10 active+undersized, 20 peering, 3 stale+active+clean, 9 active+undersized+degraded, 119 active+clean; 457 KiB data, 261 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 29/627 objects degraded (4.625%), 9 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: osdmap e149: 8 total, 7 up, 8 in 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: from='osd.6 [v2:192.168.123.103:6816/3276374127,v1:192.168.123.103:6817/3276374127]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: pgmap v486: 161 pgs: 10 active+undersized, 20 peering, 3 stale+active+clean, 9 active+undersized+degraded, 119 active+clean; 457 KiB data, 261 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 29/627 objects degraded (4.625%), 9 pgs degraded (PG_DEGRADED) 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: osdmap e149: 8 total, 7 up, 8 in 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: from='osd.6 [v2:192.168.123.103:6816/3276374127,v1:192.168.123.103:6817/3276374127]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:56.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T09:59:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:56.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:57.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:56 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:59:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:56 vm03 ceph-mon[123760]: osd.6 [v2:192.168.123.103:6816/3276374127,v1:192.168.123.103:6817/3276374127] boot 2026-03-10T09:59:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:56 vm03 ceph-mon[123760]: osdmap e150: 8 total, 8 up, 8 in 2026-03-10T09:59:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:59:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:56 vm03 ceph-mon[123760]: pgmap v489: 161 pgs: 2 unknown, 17 active+undersized, 20 peering, 13 active+undersized+degraded, 109 active+clean; 457 KiB data, 277 MiB used, 160 GiB / 160 GiB avail; 41/627 objects degraded (6.539%) 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[118593]: osd.6 [v2:192.168.123.103:6816/3276374127,v1:192.168.123.103:6817/3276374127] boot 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[118593]: osdmap e150: 8 total, 8 up, 8 in 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[118593]: pgmap v489: 161 pgs: 2 unknown, 17 active+undersized, 20 peering, 13 active+undersized+degraded, 109 active+clean; 457 KiB data, 277 MiB used, 160 GiB / 160 GiB avail; 41/627 objects degraded (6.539%) 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:59:56.894+0000 7f8211476640 -1 mgr.server reply reply (11) Resource temporarily unavailable 2 pgs have unknown state; cannot draw any conclusions 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 09:59:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T09:59:56.894+0000 7f8211476640 -1 mgr.server reply reply (16) Device or resource busy 2 pgs have unknown state; cannot draw any conclusionsunsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[121907]: osd.6 [v2:192.168.123.103:6816/3276374127,v1:192.168.123.103:6817/3276374127] boot 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[121907]: osdmap e150: 8 total, 8 up, 8 in 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T09:59:57.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:56 vm00 ceph-mon[121907]: pgmap v489: 161 pgs: 2 unknown, 17 active+undersized, 20 peering, 13 active+undersized+degraded, 109 active+clean; 457 KiB data, 277 MiB used, 160 GiB / 160 GiB avail; 41/627 objects degraded (6.539%) 2026-03-10T09:59:57.058 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:56.960Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:57.058 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 09:59:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T09:59:56.961Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T09:59:57.058 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 3s ago 13m - - 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 2s ago 13m - - 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (7m) 3s ago 11m 26.0M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (7m) 2s ago 11m 49.1M - dad864ee21e9 011f2081bf92 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (5m) 3s ago 10m 53.1M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (4m) 2s ago 13m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (6m) 3s ago 13m 576M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (4m) 3s ago 13m 76.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (4m) 2s ago 13m 51.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (4m) 3s ago 13m 55.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (7m) 3s ago 11m 9899k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (7m) 2s ago 11m 9773k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (3m) 3s ago 12m 78.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (2m) 3s ago 12m 52.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 88805c559d1d 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (113s) 3s ago 12m 48.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e be8a08b99e6b 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (69s) 3s ago 12m 75.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5468a49ce587 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (47s) 2s ago 12m 48.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 0e3f0f82bc78 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (26s) 2s ago 12m 46.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c6d3a12c7d3 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (3s) 2s ago 12m 12.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09cb5d413a11 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (11m) 2s ago 11m 61.6M 4096M 17.2.0 e1d6a67b021e 9271ca589720 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (5m) 2s ago 11m 50.6M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (11m) 3s ago 11m 95.4M - 17.2.0 e1d6a67b021e a1037186db7f 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (11m) 2s ago 11m 96.2M - 17.2.0 e1d6a67b021e 6cf3e03ed4bc 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (11m) 3s ago 11m 96.5M - 17.2.0 e1d6a67b021e 3ac258c6b805 2026-03-10T09:59:57.462 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (11m) 2s ago 11m 93.4M - 17.2.0 e1d6a67b021e 5cd6eb4d6619 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 7 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 12 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T09:59:57.700 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:59:57.766 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: osdmap e151: 8 total, 8 up, 8 in 2026-03-10T09:59:57.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:57.767 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "12/23 daemons upgraded", 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T09:59:57.916 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: osdmap e151: 8 total, 8 up, 8 in 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: pgmap v491: 161 pgs: 2 unknown, 17 active+undersized, 21 peering, 13 active+undersized+degraded, 108 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 41/627 objects degraded (6.539%) 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: Upgrade: 2 pgs have unknown state; cannot draw any conclusions 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='client.44377 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='client.34395 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='client.44386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:57 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/1478001066' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: pgmap v491: 161 pgs: 2 unknown, 17 active+undersized, 21 peering, 13 active+undersized+degraded, 108 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 41/627 objects degraded (6.539%) 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: Upgrade: 2 pgs have unknown state; cannot draw any conclusions 2026-03-10T09:59:58.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='client.44377 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='client.34395 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='client.44386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/1478001066' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: osdmap e151: 8 total, 8 up, 8 in 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: pgmap v491: 161 pgs: 2 unknown, 17 active+undersized, 21 peering, 13 active+undersized+degraded, 108 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 41/627 objects degraded (6.539%) 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: Upgrade: 2 pgs have unknown state; cannot draw any conclusions 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='client.44377 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='client.34395 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='client.44386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:58.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:57 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/1478001066' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline; Reduced data availability: 1 pg inactive, 3 pgs peering; Degraded data redundancy: 41/627 objects degraded (6.539%), 13 pgs degraded 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive, 3 pgs peering 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.1a is stuck peering for 11m, current state peering, last acting [4,7] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.a is stuck peering for 60s, current state peering, last acting [4,1] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.a is stuck peering for 2m, current state peering, last acting [1,7] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] PG_DEGRADED: Degraded data redundancy: 41/627 objects degraded (6.539%), 13 pgs degraded 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 1.0 is active+undersized+degraded, acting [7,0] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.2 is active+undersized+degraded, acting [5,1] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.16 is active+undersized+degraded, acting [5,2] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.19 is active+undersized+degraded, acting [3,0] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.1d is active+undersized+degraded, acting [7,0] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.2 is active+undersized+degraded, acting [3,5] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.c is active+undersized+degraded, acting [5,3] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.d is active+undersized+degraded, acting [7,5] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.11 is active+undersized+degraded, acting [7,4] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.1d is active+undersized+degraded, acting [5,4] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.1e is active+undersized+degraded, acting [3,2] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.1f is active+undersized+degraded, acting [5,1] 2026-03-10T09:59:58.159 INFO:teuthology.orchestra.run.vm00.stdout: pg 6.c is active+undersized+degraded, acting [3,5] 2026-03-10T09:59:59.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:58 vm00 ceph-mon[118593]: from='client.44395 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:59.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:58 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3872068653' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:59:59.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:58 vm00 ceph-mon[121907]: from='client.44395 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:59.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:58 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3872068653' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T09:59:59.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:58 vm03 ceph-mon[123760]: from='client.44395 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T09:59:59.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:58 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3872068653' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:00:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 09:59:59 vm00 ceph-mon[118593]: pgmap v492: 161 pgs: 2 unknown, 12 active+undersized, 8 peering, 9 active+undersized+degraded, 130 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 25/627 objects degraded (3.987%) 2026-03-10T10:00:00.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 09:59:59 vm00 ceph-mon[121907]: pgmap v492: 161 pgs: 2 unknown, 12 active+undersized, 8 peering, 9 active+undersized+degraded, 130 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 25/627 objects degraded (3.987%) 2026-03-10T10:00:00.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 09:59:59 vm03 ceph-mon[123760]: pgmap v492: 161 pgs: 2 unknown, 12 active+undersized, 8 peering, 9 active+undersized+degraded, 130 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 25/627 objects degraded (3.987%) 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: Health detail: HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline; Reduced data availability: 1 pg inactive, 3 pgs peering; Degraded data redundancy: 25/627 objects degraded (3.987%), 9 pgs degraded 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: [WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: [WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive, 3 pgs peering 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 2.1a is stuck peering for 11m, current state peering, last acting [4,7] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 3.a is stuck peering for 62s, current state peering, last acting [4,1] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 4.a is stuck peering for 2m, current state peering, last acting [1,7] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: [WRN] PG_DEGRADED: Degraded data redundancy: 25/627 objects degraded (3.987%), 9 pgs degraded 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 2.2 is active+undersized+degraded, acting [5,1] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 2.16 is active+undersized+degraded, acting [5,2] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 2.19 is active+undersized+degraded, acting [3,0] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 3.2 is active+undersized+degraded, acting [3,5] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 3.c is active+undersized+degraded, acting [5,3] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 3.1d is active+undersized+degraded, acting [5,4] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 3.1e is active+undersized+degraded, acting [3,2] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 4.1f is active+undersized+degraded, acting [5,1] 2026-03-10T10:00:01.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[118593]: pg 6.c is active+undersized+degraded, acting [3,5] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: Health detail: HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline; Reduced data availability: 1 pg inactive, 3 pgs peering; Degraded data redundancy: 25/627 objects degraded (3.987%), 9 pgs degraded 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: [WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: [WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive, 3 pgs peering 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 2.1a is stuck peering for 11m, current state peering, last acting [4,7] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 3.a is stuck peering for 62s, current state peering, last acting [4,1] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 4.a is stuck peering for 2m, current state peering, last acting [1,7] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: [WRN] PG_DEGRADED: Degraded data redundancy: 25/627 objects degraded (3.987%), 9 pgs degraded 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 2.2 is active+undersized+degraded, acting [5,1] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 2.16 is active+undersized+degraded, acting [5,2] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 2.19 is active+undersized+degraded, acting [3,0] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 3.2 is active+undersized+degraded, acting [3,5] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 3.c is active+undersized+degraded, acting [5,3] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 3.1d is active+undersized+degraded, acting [5,4] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 3.1e is active+undersized+degraded, acting [3,2] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 4.1f is active+undersized+degraded, acting [5,1] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:00 vm00 ceph-mon[121907]: pg 6.c is active+undersized+degraded, acting [3,5] 2026-03-10T10:00:01.120 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:00:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:00:00] "GET /metrics HTTP/1.1" 200 38237 "" "Prometheus/2.51.0" 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: Health detail: HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline; Reduced data availability: 1 pg inactive, 3 pgs peering; Degraded data redundancy: 25/627 objects degraded (3.987%), 9 pgs degraded 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: [WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: [WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive, 3 pgs peering 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 2.1a is stuck peering for 11m, current state peering, last acting [4,7] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 3.a is stuck peering for 62s, current state peering, last acting [4,1] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 4.a is stuck peering for 2m, current state peering, last acting [1,7] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: [WRN] PG_DEGRADED: Degraded data redundancy: 25/627 objects degraded (3.987%), 9 pgs degraded 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 2.2 is active+undersized+degraded, acting [5,1] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 2.16 is active+undersized+degraded, acting [5,2] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 2.19 is active+undersized+degraded, acting [3,0] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 3.2 is active+undersized+degraded, acting [3,5] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 3.c is active+undersized+degraded, acting [5,3] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 3.1d is active+undersized+degraded, acting [5,4] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 3.1e is active+undersized+degraded, acting [3,2] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 4.1f is active+undersized+degraded, acting [5,1] 2026-03-10T10:00:01.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:00 vm03 ceph-mon[123760]: pg 6.c is active+undersized+degraded, acting [3,5] 2026-03-10T10:00:02.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:01 vm00 ceph-mon[118593]: pgmap v493: 161 pgs: 1 peering, 160 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:00:02.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:01 vm00 ceph-mon[118593]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 3 pgs peering) 2026-03-10T10:00:02.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:01 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 25/627 objects degraded (3.987%), 9 pgs degraded) 2026-03-10T10:00:02.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:01 vm00 ceph-mon[121907]: pgmap v493: 161 pgs: 1 peering, 160 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:00:02.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:01 vm00 ceph-mon[121907]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 3 pgs peering) 2026-03-10T10:00:02.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:01 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 25/627 objects degraded (3.987%), 9 pgs degraded) 2026-03-10T10:00:02.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:01 vm03 ceph-mon[123760]: pgmap v493: 161 pgs: 1 peering, 160 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:00:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:01 vm03 ceph-mon[123760]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 3 pgs peering) 2026-03-10T10:00:02.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:01 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 25/627 objects degraded (3.987%), 9 pgs degraded) 2026-03-10T10:00:03.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:03.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:03.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:04.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:03 vm00 ceph-mon[118593]: pgmap v494: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 436 B/s rd, 0 op/s 2026-03-10T10:00:04.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:03 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:04.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:03 vm00 ceph-mon[121907]: pgmap v494: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 436 B/s rd, 0 op/s 2026-03-10T10:00:04.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:03 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:04.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:03 vm03 ceph-mon[123760]: pgmap v494: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 436 B/s rd, 0 op/s 2026-03-10T10:00:04.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:03 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:06.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:05 vm03 ceph-mon[123760]: pgmap v495: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 369 B/s rd, 0 op/s 2026-03-10T10:00:06.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:05 vm00 ceph-mon[118593]: pgmap v495: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 369 B/s rd, 0 op/s 2026-03-10T10:00:06.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:05 vm00 ceph-mon[121907]: pgmap v495: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 369 B/s rd, 0 op/s 2026-03-10T10:00:07.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:06.961Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:07.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:06.962Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:08.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:08 vm03 ceph-mon[123760]: pgmap v496: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 816 B/s rd, 0 op/s 2026-03-10T10:00:08.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:08 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:08 vm00 ceph-mon[118593]: pgmap v496: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 816 B/s rd, 0 op/s 2026-03-10T10:00:08.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:08 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:08 vm00 ceph-mon[121907]: pgmap v496: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 816 B/s rd, 0 op/s 2026-03-10T10:00:08.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:08 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:10.487 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:10 vm03 ceph-mon[123760]: pgmap v497: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-10T10:00:10.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:10 vm00 ceph-mon[118593]: pgmap v497: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-10T10:00:10.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:10 vm00 ceph-mon[121907]: pgmap v497: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-10T10:00:11.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:00:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:00:10] "GET /metrics HTTP/1.1" 200 38252 "" "Prometheus/2.51.0" 2026-03-10T10:00:12.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:12 vm00 ceph-mon[118593]: pgmap v498: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T10:00:12.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:12 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T10:00:12.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:12 vm00 ceph-mon[121907]: pgmap v498: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T10:00:12.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:12 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T10:00:12.448 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:12 vm03 ceph-mon[123760]: pgmap v498: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T10:00:12.448 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:12 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T10:00:13.002 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 systemd[1]: Stopping Ceph osd.7 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:00:13.266 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[62139]: 2026-03-10T10:00:13.072+0000 7f18f7ffb700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:00:13.266 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[62139]: 2026-03-10T10:00:13.072+0000 7f18f7ffb700 -1 osd.7 151 *** Got signal Terminated *** 2026-03-10T10:00:13.266 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[62139]: 2026-03-10T10:00:13.072+0000 7f18f7ffb700 -1 osd.7 151 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T10:00:13.528 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[154896]: 2026-03-10 10:00:13.319861857 +0000 UTC m=+0.260498030 container died 9271ca58972038cd2baafcbd643a10434f7e38ea9a8224694ece23ab634dd924 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, release=754, maintainer=Guillaume Abrioux , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, CEPH_POINT_RELEASE=-17.2.0, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, GIT_BRANCH=HEAD, io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vcs-type=git, distribution-scope=public, vendor=Red Hat, Inc., GIT_CLEAN=True, io.buildah.version=1.19.8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-10T10:00:13.529 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[154896]: 2026-03-10 10:00:13.351760651 +0000 UTC m=+0.292396823 container remove 9271ca58972038cd2baafcbd643a10434f7e38ea9a8224694ece23ab634dd924 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vcs-type=git, version=8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, GIT_BRANCH=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.expose-services=, RELEASE=HEAD, distribution-scope=public, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, ceph=True, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, release=754, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, CEPH_POINT_RELEASE=-17.2.0) 2026-03-10T10:00:13.529 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 bash[154896]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7 2026-03-10T10:00:13.529 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[154964]: 2026-03-10 10:00:13.488213752 +0000 UTC m=+0.019698119 container create 1a9c61f7c96edcfecff67532c36be9a4bed33f8f440b5a67cb0d51e7c7f0af4d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T10:00:13.529 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:13 vm03 ceph-mon[123760]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T10:00:13.529 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:13 vm03 ceph-mon[123760]: Upgrade: osd.7 is safe to restart 2026-03-10T10:00:13.529 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:13 vm03 ceph-mon[123760]: Upgrade: Updating osd.7 2026-03-10T10:00:13.529 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:13.529 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T10:00:13.529 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:13 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:13.529 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:13 vm03 ceph-mon[123760]: Deploying daemon osd.7 on vm03 2026-03-10T10:00:13.529 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:13 vm03 ceph-mon[123760]: osd.7 marked itself down and dead 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[118593]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[118593]: Upgrade: osd.7 is safe to restart 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[118593]: Upgrade: Updating osd.7 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[118593]: Deploying daemon osd.7 on vm03 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[118593]: osd.7 marked itself down and dead 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[121907]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[121907]: Upgrade: osd.7 is safe to restart 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[121907]: Upgrade: Updating osd.7 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[121907]: Deploying daemon osd.7 on vm03 2026-03-10T10:00:13.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:13 vm00 ceph-mon[121907]: osd.7 marked itself down and dead 2026-03-10T10:00:13.790 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[154964]: 2026-03-10 10:00:13.528742067 +0000 UTC m=+0.060226444 container init 1a9c61f7c96edcfecff67532c36be9a4bed33f8f440b5a67cb0d51e7c7f0af4d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS) 2026-03-10T10:00:13.790 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[154964]: 2026-03-10 10:00:13.532954903 +0000 UTC m=+0.064439269 container start 1a9c61f7c96edcfecff67532c36be9a4bed33f8f440b5a67cb0d51e7c7f0af4d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T10:00:13.790 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[154964]: 2026-03-10 10:00:13.533974801 +0000 UTC m=+0.065459158 container attach 1a9c61f7c96edcfecff67532c36be9a4bed33f8f440b5a67cb0d51e7c7f0af4d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True) 2026-03-10T10:00:13.790 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[154964]: 2026-03-10 10:00:13.481417884 +0000 UTC m=+0.012902250 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:00:13.790 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[154964]: 2026-03-10 10:00:13.658497494 +0000 UTC m=+0.189981860 container died 1a9c61f7c96edcfecff67532c36be9a4bed33f8f440b5a67cb0d51e7c7f0af4d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223) 2026-03-10T10:00:13.790 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[154964]: 2026-03-10 10:00:13.674790798 +0000 UTC m=+0.206275165 container remove 1a9c61f7c96edcfecff67532c36be9a4bed33f8f440b5a67cb0d51e7c7f0af4d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T10:00:13.790 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.7.service: Deactivated successfully. 2026-03-10T10:00:13.790 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 systemd[1]: Stopped Ceph osd.7 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:00:13.790 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.7.service: Consumed 5.660s CPU time. 2026-03-10T10:00:14.048 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 systemd[1]: Starting Ceph osd.7 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:00:14.048 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:13 vm03 podman[155065]: 2026-03-10 10:00:13.98036231 +0000 UTC m=+0.016664001 container create 27f9520af50c162a520e45ae88c1ea9d6dcdcbf36fc4430dbe62ab9e971376f8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3) 2026-03-10T10:00:14.048 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 podman[155065]: 2026-03-10 10:00:14.023427823 +0000 UTC m=+0.059729525 container init 27f9520af50c162a520e45ae88c1ea9d6dcdcbf36fc4430dbe62ab9e971376f8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-10T10:00:14.048 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 podman[155065]: 2026-03-10 10:00:14.027177832 +0000 UTC m=+0.063479523 container start 27f9520af50c162a520e45ae88c1ea9d6dcdcbf36fc4430dbe62ab9e971376f8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T10:00:14.048 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 podman[155065]: 2026-03-10 10:00:14.029160272 +0000 UTC m=+0.065461963 container attach 27f9520af50c162a520e45ae88c1ea9d6dcdcbf36fc4430dbe62ab9e971376f8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate, org.label-schema.build-date=20260223, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-10T10:00:14.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 podman[155065]: 2026-03-10 10:00:13.973614109 +0000 UTC m=+0.009915810 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:00:14.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T10:00:14.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T10:00:14.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T10:00:14.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T10:00:14.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:14 vm03 ceph-mon[123760]: pgmap v499: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:00:14.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:14 vm03 ceph-mon[123760]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T10:00:14.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:14 vm03 ceph-mon[123760]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-10T10:00:14.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:14 vm03 ceph-mon[123760]: osdmap e152: 8 total, 7 up, 8 in 2026-03-10T10:00:14.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:14 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:14.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:14.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:14.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:14 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[118593]: pgmap v499: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[118593]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[118593]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[118593]: osdmap e152: 8 total, 7 up, 8 in 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[121907]: pgmap v499: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[121907]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[121907]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[121907]: osdmap e152: 8 total, 7 up, 8 in 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:14.557 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:14 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-32f79d5b-ab89-4ca9-8704-62ea073c5e6a/osd-block-4c157a84-f3cc-493a-ae93-8da2f0c9dd62 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-32f79d5b-ab89-4ca9-8704-62ea073c5e6a/osd-block-4c157a84-f3cc-493a-ae93-8da2f0c9dd62 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-10T10:00:14.935 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/ln -snf /dev/ceph-32f79d5b-ab89-4ca9-8704-62ea073c5e6a/osd-block-4c157a84-f3cc-493a-ae93-8da2f0c9dd62 /var/lib/ceph/osd/ceph-7/block 2026-03-10T10:00:15.298 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/ln -snf /dev/ceph-32f79d5b-ab89-4ca9-8704-62ea073c5e6a/osd-block-4c157a84-f3cc-493a-ae93-8da2f0c9dd62 /var/lib/ceph/osd/ceph-7/block 2026-03-10T10:00:15.298 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-10T10:00:15.298 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-10T10:00:15.298 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T10:00:15.298 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate[155076]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 bash[155065]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 podman[155065]: 2026-03-10 10:00:14.968290329 +0000 UTC m=+1.004592011 container died 27f9520af50c162a520e45ae88c1ea9d6dcdcbf36fc4430dbe62ab9e971376f8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:14 vm03 podman[155065]: 2026-03-10 10:00:14.988905223 +0000 UTC m=+1.025206914 container remove 27f9520af50c162a520e45ae88c1ea9d6dcdcbf36fc4430dbe62ab9e971376f8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-activate, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default) 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:15 vm03 podman[155323]: 2026-03-10 10:00:15.089378693 +0000 UTC m=+0.018645509 container create 6aa4b0e4646a80e7620bdf58b26743d1e6493e431a0899e7d9e2f9fe9fd6d817 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:15 vm03 podman[155323]: 2026-03-10 10:00:15.120997012 +0000 UTC m=+0.050263828 container init 6aa4b0e4646a80e7620bdf58b26743d1e6493e431a0899e7d9e2f9fe9fd6d817 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:15 vm03 podman[155323]: 2026-03-10 10:00:15.126188258 +0000 UTC m=+0.055455074 container start 6aa4b0e4646a80e7620bdf58b26743d1e6493e431a0899e7d9e2f9fe9fd6d817 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0) 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:15 vm03 bash[155323]: 6aa4b0e4646a80e7620bdf58b26743d1e6493e431a0899e7d9e2f9fe9fd6d817 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:15 vm03 podman[155323]: 2026-03-10 10:00:15.082668134 +0000 UTC m=+0.011934959 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:00:15.299 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:15 vm03 systemd[1]: Started Ceph osd.7 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:00:15.586 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:15 vm03 ceph-mon[123760]: osdmap e153: 8 total, 7 up, 8 in 2026-03-10T10:00:15.586 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:15.586 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:15 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:15.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:15 vm00 ceph-mon[118593]: osdmap e153: 8 total, 7 up, 8 in 2026-03-10T10:00:15.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:15.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:15 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:15.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:15 vm00 ceph-mon[121907]: osdmap e153: 8 total, 7 up, 8 in 2026-03-10T10:00:15.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:15.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:15 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:16.172 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:00:15.945+0000 7f17d9c6c740 -1 Falling back to public interface 2026-03-10T10:00:16.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:16 vm03 ceph-mon[123760]: pgmap v502: 161 pgs: 9 active+undersized, 10 peering, 15 stale+active+clean, 2 active+undersized+degraded, 125 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 3/627 objects degraded (0.478%) 2026-03-10T10:00:16.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:16 vm03 ceph-mon[123760]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T10:00:16.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:16 vm03 ceph-mon[123760]: Health check failed: Degraded data redundancy: 3/627 objects degraded (0.478%), 2 pgs degraded (PG_DEGRADED) 2026-03-10T10:00:16.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:16 vm00 ceph-mon[118593]: pgmap v502: 161 pgs: 9 active+undersized, 10 peering, 15 stale+active+clean, 2 active+undersized+degraded, 125 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 3/627 objects degraded (0.478%) 2026-03-10T10:00:16.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:16 vm00 ceph-mon[118593]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T10:00:16.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:16 vm00 ceph-mon[118593]: Health check failed: Degraded data redundancy: 3/627 objects degraded (0.478%), 2 pgs degraded (PG_DEGRADED) 2026-03-10T10:00:16.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:16 vm00 ceph-mon[121907]: pgmap v502: 161 pgs: 9 active+undersized, 10 peering, 15 stale+active+clean, 2 active+undersized+degraded, 125 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 3/627 objects degraded (0.478%) 2026-03-10T10:00:16.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:16 vm00 ceph-mon[121907]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T10:00:16.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:16 vm00 ceph-mon[121907]: Health check failed: Degraded data redundancy: 3/627 objects degraded (0.478%), 2 pgs degraded (PG_DEGRADED) 2026-03-10T10:00:17.199 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:00:16.817+0000 7f17d9c6c740 -1 osd.7 0 read_superblock omap replica is missing. 2026-03-10T10:00:17.199 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:00:16.860+0000 7f17d9c6c740 -1 osd.7 151 log_to_monitors true 2026-03-10T10:00:17.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:16.962Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:17.370 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:16.963Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:17 vm03 ceph-mon[123760]: from='osd.7 [v2:192.168.123.103:6824/250813490,v1:192.168.123.103:6825/250813490]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T10:00:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:17 vm03 ceph-mon[123760]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T10:00:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[118593]: from='osd.7 [v2:192.168.123.103:6824/250813490,v1:192.168.123.103:6825/250813490]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T10:00:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[118593]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T10:00:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:17.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[121907]: from='osd.7 [v2:192.168.123.103:6824/250813490,v1:192.168.123.103:6825/250813490]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T10:00:17.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[121907]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T10:00:17.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:17.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: pgmap v503: 161 pgs: 32 active+undersized, 10 peering, 8 stale+active+clean, 11 active+undersized+degraded, 100 active+clean; 457 KiB data, 296 MiB used, 160 GiB / 160 GiB avail; 30/627 objects degraded (4.785%) 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='osd.7 [v2:192.168.123.103:6824/250813490,v1:192.168.123.103:6825/250813490]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: osdmap e154: 8 total, 7 up, 8 in 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T10:00:18.559 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: pgmap v503: 161 pgs: 32 active+undersized, 10 peering, 8 stale+active+clean, 11 active+undersized+degraded, 100 active+clean; 457 KiB data, 296 MiB used, 160 GiB / 160 GiB avail; 30/627 objects degraded (4.785%) 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='osd.7 [v2:192.168.123.103:6824/250813490,v1:192.168.123.103:6825/250813490]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: osdmap e154: 8 total, 7 up, 8 in 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T10:00:18.564 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-10T10:00:18.565 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T10:00:18.565 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-10T10:00:18.565 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T10:00:18.565 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-10T10:00:18.565 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T10:00:18.565 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-10T10:00:18.565 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T10:00:18.565 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-10T10:00:18.565 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T10:00:18.797 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:00:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:00:18.526+0000 7f17d1216640 -1 osd.7 151 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: pgmap v503: 161 pgs: 32 active+undersized, 10 peering, 8 stale+active+clean, 11 active+undersized+degraded, 100 active+clean; 457 KiB data, 296 MiB used, 160 GiB / 160 GiB avail; 30/627 objects degraded (4.785%) 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='osd.7 [v2:192.168.123.103:6824/250813490,v1:192.168.123.103:6825/250813490]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: osdmap e154: 8 total, 7 up, 8 in 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-10T10:00:18.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T10:00:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-10T10:00:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T10:00:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-10T10:00:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T10:00:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-10T10:00:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T10:00:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-10T10:00:18.799 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T10:00:20.045 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: pgmap v505: 161 pgs: 39 active+undersized, 10 peering, 20 active+undersized+degraded, 92 active+clean; 457 KiB data, 312 MiB used, 160 GiB / 160 GiB avail; 65/627 objects degraded (10.367%) 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: Upgrade: Setting container_image for all osd 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: Upgrade: Setting require_osd_release to 19 squid 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: osd.7 [v2:192.168.123.103:6824/250813490,v1:192.168.123.103:6825/250813490] boot 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: osdmap e155: 8 total, 8 up, 8 in 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: pgmap v505: 161 pgs: 39 active+undersized, 10 peering, 20 active+undersized+degraded, 92 active+clean; 457 KiB data, 312 MiB used, 160 GiB / 160 GiB avail; 65/627 objects degraded (10.367%) 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: Upgrade: Setting container_image for all osd 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: Upgrade: Setting require_osd_release to 19 squid 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: osd.7 [v2:192.168.123.103:6824/250813490,v1:192.168.123.103:6825/250813490] boot 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: osdmap e155: 8 total, 8 up, 8 in 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:20.046 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:20.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: pgmap v505: 161 pgs: 39 active+undersized, 10 peering, 20 active+undersized+degraded, 92 active+clean; 457 KiB data, 312 MiB used, 160 GiB / 160 GiB avail; 65/627 objects degraded (10.367%) 2026-03-10T10:00:20.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: Upgrade: Setting container_image for all osd 2026-03-10T10:00:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: Upgrade: Setting require_osd_release to 19 squid 2026-03-10T10:00:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T10:00:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-10T10:00:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-10T10:00:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: osd.7 [v2:192.168.123.103:6824/250813490,v1:192.168.123.103:6825/250813490] boot 2026-03-10T10:00:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: osdmap e155: 8 total, 8 up, 8 in 2026-03-10T10:00:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T10:00:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:20.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:20.969 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[118593]: OSD bench result of 27307.766116 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T10:00:20.969 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[118593]: Upgrade: Setting container_image for all mds 2026-03-10T10:00:20.969 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:20.969 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:20.969 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:20.969 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:00:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:00:20] "GET /metrics HTTP/1.1" 200 38327 "" "Prometheus/2.51.0" 2026-03-10T10:00:20.970 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[121907]: OSD bench result of 27307.766116 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T10:00:20.970 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[121907]: Upgrade: Setting container_image for all mds 2026-03-10T10:00:20.970 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:20.970 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:20.970 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:20 vm03 ceph-mon[123760]: OSD bench result of 27307.766116 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T10:00:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:20 vm03 ceph-mon[123760]: Upgrade: Setting container_image for all mds 2026-03-10T10:00:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:21.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[118593]: Upgrade: Updating rgw.foo.vm00.gcwrcv (1/4) 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[118593]: Deploying daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[118593]: pgmap v507: 161 pgs: 18 peering, 31 active+undersized, 20 active+undersized+degraded, 92 active+clean; 457 KiB data, 312 MiB used, 160 GiB / 160 GiB avail; 65/627 objects degraded (10.367%) 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[118593]: osdmap e156: 8 total, 8 up, 8 in 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[118593]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[118593]: Health check update: Degraded data redundancy: 65/627 objects degraded (10.367%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[121907]: Upgrade: Updating rgw.foo.vm00.gcwrcv (1/4) 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[121907]: Deploying daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[121907]: pgmap v507: 161 pgs: 18 peering, 31 active+undersized, 20 active+undersized+degraded, 92 active+clean; 457 KiB data, 312 MiB used, 160 GiB / 160 GiB avail; 65/627 objects degraded (10.367%) 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[121907]: osdmap e156: 8 total, 8 up, 8 in 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[121907]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:21.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:21 vm00 ceph-mon[121907]: Health check update: Degraded data redundancy: 65/627 objects degraded (10.367%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T10:00:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:21 vm03 ceph-mon[123760]: Upgrade: Updating rgw.foo.vm00.gcwrcv (1/4) 2026-03-10T10:00:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:21 vm03 ceph-mon[123760]: Deploying daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T10:00:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:21 vm03 ceph-mon[123760]: pgmap v507: 161 pgs: 18 peering, 31 active+undersized, 20 active+undersized+degraded, 92 active+clean; 457 KiB data, 312 MiB used, 160 GiB / 160 GiB avail; 65/627 objects degraded (10.367%) 2026-03-10T10:00:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:21 vm03 ceph-mon[123760]: osdmap e156: 8 total, 8 up, 8 in 2026-03-10T10:00:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:21 vm03 ceph-mon[123760]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T10:00:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:21 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:22.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:21 vm03 ceph-mon[123760]: Health check update: Degraded data redundancy: 65/627 objects degraded (10.367%), 20 pgs degraded (PG_DEGRADED) 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[118593]: Upgrade: Updating rgw.smpl.vm00.tpyqjn (2/4) 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[118593]: Deploying daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[118593]: pgmap v509: 161 pgs: 18 peering, 19 active+undersized, 15 active+undersized+degraded, 109 active+clean; 457 KiB data, 312 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 185 B/s wr, 26 op/s; 56/630 objects degraded (8.889%) 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[121907]: Upgrade: Updating rgw.smpl.vm00.tpyqjn (2/4) 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[121907]: Deploying daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T10:00:23.102 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:23 vm00 ceph-mon[121907]: pgmap v509: 161 pgs: 18 peering, 19 active+undersized, 15 active+undersized+degraded, 109 active+clean; 457 KiB data, 312 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 185 B/s wr, 26 op/s; 56/630 objects degraded (8.889%) 2026-03-10T10:00:23.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:23 vm03 ceph-mon[123760]: Upgrade: Updating rgw.smpl.vm00.tpyqjn (2/4) 2026-03-10T10:00:23.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:23.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:23.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:23 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:23.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:23 vm03 ceph-mon[123760]: Deploying daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T10:00:23.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:23 vm03 ceph-mon[123760]: pgmap v509: 161 pgs: 18 peering, 19 active+undersized, 15 active+undersized+degraded, 109 active+clean; 457 KiB data, 312 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 185 B/s wr, 26 op/s; 56/630 objects degraded (8.889%) 2026-03-10T10:00:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:24 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:24 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:24 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:24.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:24 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:24.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:24 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:24.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:24 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:24.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:24 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:24.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:24 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:24.450 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:24 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:25.335 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:25 vm03 ceph-mon[123760]: pgmap v510: 161 pgs: 18 peering, 143 active+clean; 457 KiB data, 316 MiB used, 160 GiB / 160 GiB avail; 61 KiB/s rd, 151 B/s wr, 92 op/s 2026-03-10T10:00:25.335 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:25 vm03 ceph-mon[123760]: Upgrade: Updating rgw.foo.vm03.smqfat (3/4) 2026-03-10T10:00:25.335 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:25 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:25.591 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:25 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:25.591 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:25 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:25.591 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:25 vm03 ceph-mon[123760]: Deploying daemon rgw.foo.vm03.smqfat on vm03 2026-03-10T10:00:25.591 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:25 vm03 ceph-mon[123760]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 56/630 objects degraded (8.889%), 15 pgs degraded) 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[118593]: pgmap v510: 161 pgs: 18 peering, 143 active+clean; 457 KiB data, 316 MiB used, 160 GiB / 160 GiB avail; 61 KiB/s rd, 151 B/s wr, 92 op/s 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[118593]: Upgrade: Updating rgw.foo.vm03.smqfat (3/4) 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[118593]: Deploying daemon rgw.foo.vm03.smqfat on vm03 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[118593]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 56/630 objects degraded (8.889%), 15 pgs degraded) 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[121907]: pgmap v510: 161 pgs: 18 peering, 143 active+clean; 457 KiB data, 316 MiB used, 160 GiB / 160 GiB avail; 61 KiB/s rd, 151 B/s wr, 92 op/s 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[121907]: Upgrade: Updating rgw.foo.vm03.smqfat (3/4) 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.smqfat", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[121907]: Deploying daemon rgw.foo.vm03.smqfat on vm03 2026-03-10T10:00:25.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:25 vm00 ceph-mon[121907]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 56/630 objects degraded (8.889%), 15 pgs degraded) 2026-03-10T10:00:27.282 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:27.282 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:27.282 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:27 vm03 ceph-mon[123760]: pgmap v511: 161 pgs: 161 active+clean; 457 KiB data, 320 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 127 B/s wr, 135 op/s 2026-03-10T10:00:27.282 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:27 vm03 ceph-mon[123760]: Upgrade: Updating rgw.smpl.vm03.evsibt (4/4) 2026-03-10T10:00:27.282 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:27.282 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:27.282 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:27 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:27.282 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:27 vm03 ceph-mon[123760]: Deploying daemon rgw.smpl.vm03.evsibt on vm03 2026-03-10T10:00:27.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:26.963Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:27.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:26.963Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[118593]: pgmap v511: 161 pgs: 161 active+clean; 457 KiB data, 320 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 127 B/s wr, 135 op/s 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[118593]: Upgrade: Updating rgw.smpl.vm03.evsibt (4/4) 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[118593]: Deploying daemon rgw.smpl.vm03.evsibt on vm03 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[121907]: pgmap v511: 161 pgs: 161 active+clean; 457 KiB data, 320 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 127 B/s wr, 135 op/s 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[121907]: Upgrade: Updating rgw.smpl.vm03.evsibt (4/4) 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm03.evsibt", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:27.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:27 vm00 ceph-mon[121907]: Deploying daemon rgw.smpl.vm03.evsibt on vm03 2026-03-10T10:00:28.390 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 15s ago 13m - - 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 12s ago 13m - - 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (8m) 15s ago 12m 26.0M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (7m) 12s ago 11m 49.1M - dad864ee21e9 011f2081bf92 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (6m) 15s ago 11m 53.2M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (5m) 12s ago 13m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (7m) 15s ago 14m 577M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (5m) 15s ago 14m 77.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (4m) 12s ago 13m 52.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (4m) 15s ago 13m 56.2M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (8m) 15s ago 12m 9907k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (7m) 12s ago 12m 9885k - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (3m) 15s ago 13m 78.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (3m) 15s ago 13m 52.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 88805c559d1d 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (2m) 15s ago 13m 48.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e be8a08b99e6b 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (101s) 15s ago 12m 75.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5468a49ce587 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (78s) 12s ago 12m 49.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 0e3f0f82bc78 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (57s) 12s ago 12m 47.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c6d3a12c7d3 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (35s) 12s ago 12m 45.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09cb5d413a11 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (13s) 12s ago 12m 14.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6aa4b0e4646a 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (6m) 12s ago 11m 50.7M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 starting - - - - 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 starting - - - - 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 starting - - - - 2026-03-10T10:00:28.872 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 starting - - - - 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 16 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T10:00:29.200 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T10:00:29.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[118593]: pgmap v512: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 165 KiB/s rd, 346 B/s wr, 253 op/s 2026-03-10T10:00:29.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[118593]: from='client.44473 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:29.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[118593]: from='client.34473 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:29.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:29.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:29.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3435271276' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "osd", 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "13/23 daemons upgraded", 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading rgw daemons", 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T10:00:29.488 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T10:00:29.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[121907]: pgmap v512: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 165 KiB/s rd, 346 B/s wr, 253 op/s 2026-03-10T10:00:29.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[121907]: from='client.44473 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:29.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[121907]: from='client.34473 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:29.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:29.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:29.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:29 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3435271276' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:29.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:29 vm03 ceph-mon[123760]: pgmap v512: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 165 KiB/s rd, 346 B/s wr, 253 op/s 2026-03-10T10:00:29.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:29 vm03 ceph-mon[123760]: from='client.44473 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:29.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:29 vm03 ceph-mon[123760]: from='client.34473 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:29.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:29.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:29 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:29.508 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:29 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3435271276' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:29.796 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T10:00:29.796 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T10:00:29.796 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T10:00:30.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[118593]: from='client.34476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:30.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[118593]: from='client.54425 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:30.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/58489971' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:00:30.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:30.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[121907]: from='client.34476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[121907]: from='client.54425 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/58489971' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:00:30.662 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:30.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:30 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:30.666 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:30 vm03 ceph-mon[123760]: from='client.34476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:30.666 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:30 vm03 ceph-mon[123760]: from='client.54425 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:00:30.666 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:30 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/58489971' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:00:30.666 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:30 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:30.666 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:30 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:31.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:00:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:00:30] "GET /metrics HTTP/1.1" 200 38327 "" "Prometheus/2.51.0" 2026-03-10T10:00:31.481 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:31 vm03 ceph-mon[123760]: pgmap v513: 161 pgs: 161 active+clean; 457 KiB data, 332 MiB used, 160 GiB / 160 GiB avail; 197 KiB/s rd, 307 B/s wr, 302 op/s 2026-03-10T10:00:31.481 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:31.481 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:31 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:31.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:31 vm00 ceph-mon[118593]: pgmap v513: 161 pgs: 161 active+clean; 457 KiB data, 332 MiB used, 160 GiB / 160 GiB avail; 197 KiB/s rd, 307 B/s wr, 302 op/s 2026-03-10T10:00:31.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:31.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:31 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:31.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:31 vm00 ceph-mon[121907]: pgmap v513: 161 pgs: 161 active+clean; 457 KiB data, 332 MiB used, 160 GiB / 160 GiB avail; 197 KiB/s rd, 307 B/s wr, 302 op/s 2026-03-10T10:00:31.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:31.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:31 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: Detected new or changed devices on vm03 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: pgmap v514: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 198 KiB/s rd, 356 B/s wr, 306 op/s 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: pgmap v515: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 221 KiB/s rd, 311 B/s wr, 341 op/s 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: Detected new or changed devices on vm03 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: pgmap v514: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 198 KiB/s rd, 356 B/s wr, 306 op/s 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: pgmap v515: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 221 KiB/s rd, 311 B/s wr, 341 op/s 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:33.193 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:33 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: Detected new or changed devices on vm03 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: pgmap v514: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 198 KiB/s rd, 356 B/s wr, 306 op/s 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: pgmap v515: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 221 KiB/s rd, 311 B/s wr, 341 op/s 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:33.214 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:33 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.305 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:34.149Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: pgmap v516: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 233 KiB/s rd, 340 B/s wr, 360 op/s 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: Checking dashboard <-> RGW credentials 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.571 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: pgmap v516: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 233 KiB/s rd, 340 B/s wr, 360 op/s 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: Checking dashboard <-> RGW credentials 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.572 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:34 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:34.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: pgmap v516: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 233 KiB/s rd, 340 B/s wr, 360 op/s 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: Checking dashboard <-> RGW credentials 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:34.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:34 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:35.656 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: pgmap v517: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 241 KiB/s rd, 411 B/s wr, 374 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: pgmap v518: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 191 KiB/s rd, 187 B/s wr, 298 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: pgmap v519: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 179 KiB/s rd, 285 B/s wr, 279 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: pgmap v520: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 192 KiB/s rd, 0 B/s wr, 298 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: pgmap v521: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 178 KiB/s rd, 0 B/s wr, 276 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: Upgrade: Updating rgw.foo.vm00.gcwrcv (1/2) 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: Deploying daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: pgmap v517: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 241 KiB/s rd, 411 B/s wr, 374 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: pgmap v518: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 191 KiB/s rd, 187 B/s wr, 298 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: pgmap v519: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 179 KiB/s rd, 285 B/s wr, 279 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: pgmap v520: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 192 KiB/s rd, 0 B/s wr, 298 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: pgmap v521: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 178 KiB/s rd, 0 B/s wr, 276 op/s 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: Upgrade: Updating rgw.foo.vm00.gcwrcv (1/2) 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: Deploying daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.657 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:35 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: pgmap v517: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 241 KiB/s rd, 411 B/s wr, 374 op/s 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: pgmap v518: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 191 KiB/s rd, 187 B/s wr, 298 op/s 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: pgmap v519: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 179 KiB/s rd, 285 B/s wr, 279 op/s 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: pgmap v520: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 192 KiB/s rd, 0 B/s wr, 298 op/s 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: pgmap v521: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 178 KiB/s rd, 0 B/s wr, 276 op/s 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: Upgrade: Updating rgw.foo.vm00.gcwrcv (1/2) 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.gcwrcv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: Deploying daemon rgw.foo.vm00.gcwrcv on vm00 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:35.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:35 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:37.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:36.963Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:37.370 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:36.963Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:37.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:37 vm03 ceph-mon[123760]: pgmap v522: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 41 KiB/s rd, 0 B/s wr, 61 op/s 2026-03-10T10:00:37.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:37 vm00 ceph-mon[118593]: pgmap v522: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 41 KiB/s rd, 0 B/s wr, 61 op/s 2026-03-10T10:00:37.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:37 vm00 ceph-mon[121907]: pgmap v522: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 41 KiB/s rd, 0 B/s wr, 61 op/s 2026-03-10T10:00:39.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:39 vm03 ceph-mon[123760]: pgmap v523: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 42 op/s 2026-03-10T10:00:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:39 vm00 ceph-mon[118593]: pgmap v523: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 42 op/s 2026-03-10T10:00:39.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:39 vm00 ceph-mon[121907]: pgmap v523: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 42 op/s 2026-03-10T10:00:41.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:00:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:00:40] "GET /metrics HTTP/1.1" 200 38365 "" "Prometheus/2.51.0" 2026-03-10T10:00:41.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:41 vm00 ceph-mon[118593]: pgmap v524: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 28 op/s 2026-03-10T10:00:41.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:41 vm00 ceph-mon[121907]: pgmap v524: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 28 op/s 2026-03-10T10:00:42.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:41 vm03 ceph-mon[123760]: pgmap v524: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 28 op/s 2026-03-10T10:00:43.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:43 vm00 ceph-mon[118593]: pgmap v525: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 3 op/s 2026-03-10T10:00:43.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:43 vm00 ceph-mon[121907]: pgmap v525: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 3 op/s 2026-03-10T10:00:43.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:43 vm03 ceph-mon[123760]: pgmap v525: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 3 op/s 2026-03-10T10:00:44.548 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:44 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:44.149Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:44.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:44 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:44.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:44 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:45.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:44 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:45.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:45 vm00 ceph-mon[118593]: pgmap v526: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 2 op/s 2026-03-10T10:00:45.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:45 vm00 ceph-mon[121907]: pgmap v526: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 2 op/s 2026-03-10T10:00:46.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:45 vm03 ceph-mon[123760]: pgmap v526: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 2 op/s 2026-03-10T10:00:47.263 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:46.964Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:47.264 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:46.965Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:47.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:47 vm03 ceph-mon[123760]: pgmap v527: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 2 op/s 2026-03-10T10:00:47.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:47.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:47.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:47.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:47.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:47 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:47.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[118593]: pgmap v527: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 2 op/s 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[121907]: pgmap v527: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 2 op/s 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm00.tpyqjn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:47.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:47 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:00:48.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:48 vm03 ceph-mon[123760]: Upgrade: Updating rgw.smpl.vm00.tpyqjn (2/2) 2026-03-10T10:00:48.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:48 vm03 ceph-mon[123760]: Deploying daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T10:00:48.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:48 vm00 ceph-mon[118593]: Upgrade: Updating rgw.smpl.vm00.tpyqjn (2/2) 2026-03-10T10:00:48.875 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:48 vm00 ceph-mon[118593]: Deploying daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T10:00:48.875 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:48 vm00 ceph-mon[121907]: Upgrade: Updating rgw.smpl.vm00.tpyqjn (2/2) 2026-03-10T10:00:48.875 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:48 vm00 ceph-mon[121907]: Deploying daemon rgw.smpl.vm00.tpyqjn on vm00 2026-03-10T10:00:49.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:49 vm03 ceph-mon[123760]: pgmap v528: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 85 B/s wr, 43 op/s 2026-03-10T10:00:49.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:49 vm00 ceph-mon[118593]: pgmap v528: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 85 B/s wr, 43 op/s 2026-03-10T10:00:49.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:49 vm00 ceph-mon[121907]: pgmap v528: 161 pgs: 161 active+clean; 457 KiB data, 336 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 85 B/s wr, 43 op/s 2026-03-10T10:00:51.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:00:50 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:00:50] "GET /metrics HTTP/1.1" 200 38385 "" "Prometheus/2.51.0" 2026-03-10T10:00:51.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:51 vm00 ceph-mon[118593]: pgmap v529: 161 pgs: 161 active+clean; 457 KiB data, 340 MiB used, 160 GiB / 160 GiB avail; 40 KiB/s rd, 85 B/s wr, 60 op/s 2026-03-10T10:00:51.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:51 vm00 ceph-mon[121907]: pgmap v529: 161 pgs: 161 active+clean; 457 KiB data, 340 MiB used, 160 GiB / 160 GiB avail; 40 KiB/s rd, 85 B/s wr, 60 op/s 2026-03-10T10:00:52.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:51 vm03 ceph-mon[123760]: pgmap v529: 161 pgs: 161 active+clean; 457 KiB data, 340 MiB used, 160 GiB / 160 GiB avail; 40 KiB/s rd, 85 B/s wr, 60 op/s 2026-03-10T10:00:53.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:53 vm00 ceph-mon[118593]: pgmap v530: 161 pgs: 161 active+clean; 457 KiB data, 340 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 85 B/s wr, 84 op/s 2026-03-10T10:00:53.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:53 vm00 ceph-mon[121907]: pgmap v530: 161 pgs: 161 active+clean; 457 KiB data, 340 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 85 B/s wr, 84 op/s 2026-03-10T10:00:54.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:53 vm03 ceph-mon[123760]: pgmap v530: 161 pgs: 161 active+clean; 457 KiB data, 340 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 85 B/s wr, 84 op/s 2026-03-10T10:00:54.665 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:54 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:54.665 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:54 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:54.949 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:54 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:00:55.819 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: pgmap v531: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 85 B/s wr, 84 op/s 2026-03-10T10:00:55.819 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.819 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: pgmap v531: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 85 B/s wr, 84 op/s 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:55.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:55 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: pgmap v531: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 85 B/s wr, 84 op/s 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:55 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: pgmap v532: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 61 KiB/s rd, 94 B/s wr, 93 op/s 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: pgmap v533: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 116 B/s wr, 114 op/s 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: pgmap v534: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 47 KiB/s rd, 0 B/s wr, 73 op/s 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: pgmap v535: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 0 B/s wr, 59 op/s 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: Checking dashboard <-> RGW credentials 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.gcwrcv"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.gcwrcv"}]': finished 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm03.smqfat"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm03.smqfat"}]': finished 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm00.tpyqjn"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm00.tpyqjn"}]': finished 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm03.evsibt"}]: dispatch 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm03.evsibt"}]': finished 2026-03-10T10:00:56.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: pgmap v532: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 61 KiB/s rd, 94 B/s wr, 93 op/s 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: pgmap v533: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 116 B/s wr, 114 op/s 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: pgmap v534: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 47 KiB/s rd, 0 B/s wr, 73 op/s 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: pgmap v535: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 0 B/s wr, 59 op/s 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: Checking dashboard <-> RGW credentials 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.gcwrcv"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.gcwrcv"}]': finished 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm03.smqfat"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm03.smqfat"}]': finished 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm00.tpyqjn"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm00.tpyqjn"}]': finished 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm03.evsibt"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm03.evsibt"}]': finished 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.711 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:56.712 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.712 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:56.712 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:56 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: pgmap v532: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 61 KiB/s rd, 94 B/s wr, 93 op/s 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: pgmap v533: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 116 B/s wr, 114 op/s 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: pgmap v534: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 47 KiB/s rd, 0 B/s wr, 73 op/s 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: pgmap v535: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 0 B/s wr, 59 op/s 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: Checking dashboard <-> RGW credentials 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.gcwrcv"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.gcwrcv"}]': finished 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm03.smqfat"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm03.smqfat"}]': finished 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm00.tpyqjn"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm00.tpyqjn"}]': finished 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm03.evsibt"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm03.evsibt"}]': finished 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:57.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:00:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:57.049 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:56 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dqkdwh", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T10:00:57.272 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:56.964Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:57.273 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:00:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:00:56.965Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[118593]: Upgrade: Setting container_image for all rgw 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[118593]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[118593]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[118593]: Upgrade: Updating iscsi.foo.vm00.dqkdwh 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[118593]: Deploying daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[121907]: Upgrade: Setting container_image for all rgw 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[121907]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[121907]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[121907]: Upgrade: Updating iscsi.foo.vm00.dqkdwh 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:57.799 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:57 vm00 ceph-mon[121907]: Deploying daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T10:00:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:57 vm03 ceph-mon[123760]: Upgrade: Setting container_image for all rgw 2026-03-10T10:00:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:57 vm03 ceph-mon[123760]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T10:00:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:57 vm03 ceph-mon[123760]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T10:00:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:57 vm03 ceph-mon[123760]: Upgrade: Updating iscsi.foo.vm00.dqkdwh 2026-03-10T10:00:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:57 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:00:58.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:57 vm03 ceph-mon[123760]: Deploying daemon iscsi.foo.vm00.dqkdwh on vm00 2026-03-10T10:00:58.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[118593]: pgmap v536: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 41 op/s 2026-03-10T10:00:58.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[121907]: pgmap v536: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 41 op/s 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:58.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:58 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:59.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:58 vm03 ceph-mon[123760]: pgmap v536: 161 pgs: 161 active+clean; 457 KiB data, 344 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 41 op/s 2026-03-10T10:00:59.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:59.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:59.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:59.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:59.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:00:59.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:58 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:00.009 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:59 vm00 ceph-mon[118593]: pgmap v537: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 108 KiB/s rd, 249 B/s wr, 164 op/s 2026-03-10T10:01:00.009 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:00:59 vm00 ceph-mon[118593]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T10:01:00.009 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:59 vm00 ceph-mon[121907]: pgmap v537: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 108 KiB/s rd, 249 B/s wr, 164 op/s 2026-03-10T10:01:00.009 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:00:59 vm00 ceph-mon[121907]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T10:01:00.010 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T10:01:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:59 vm03 ceph-mon[123760]: pgmap v537: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 108 KiB/s rd, 249 B/s wr, 164 op/s 2026-03-10T10:01:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:00:59 vm03 ceph-mon[123760]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 2s ago 14m - - 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 27s ago 14m - - 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (8m) 2s ago 12m 26.0M - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (8m) 27s ago 12m 49.1M - dad864ee21e9 011f2081bf92 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 unknown 2s ago 12m 53.3M - 3.5 e1d6a67b021e a19997a050c6 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (5m) 27s ago 14m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (7m) 2s ago 14m 581M - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (5m) 2s ago 14m 80.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (5m) 27s ago 14m 53.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (5m) 2s ago 14m 58.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (8m) 2s ago 12m 9739k - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (8m) 27s ago 12m 9.77M - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (4m) 2s ago 13m 82.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (3m) 2s ago 13m 58.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 88805c559d1d 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (2m) 2s ago 13m 52.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e be8a08b99e6b 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (2m) 2s ago 13m 82.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5468a49ce587 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (110s) 27s ago 13m 53.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 0e3f0f82bc78 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (88s) 27s ago 13m 50.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c6d3a12c7d3 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (66s) 27s ago 13m 49.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09cb5d413a11 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (45s) 27s ago 12m 71.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6aa4b0e4646a 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (6m) 27s ago 12m 50.6M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (14s) 2s ago 12m 98.3M - 19.2.3-678-ge911bdeb 654f31e6858e b0a44018a79c 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (34s) 27s ago 12m 99.0M - 19.2.3-678-ge911bdeb 654f31e6858e 06eb70f7ddbe 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (5s) 2s ago 12m 97.9M - 19.2.3-678-ge911bdeb 654f31e6858e 866b08fb2642 2026-03-10T10:01:00.384 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (31s) 27s ago 12m 99.1M - 19.2.3-678-ge911bdeb 654f31e6858e b35f07de50cd 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 18 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T10:01:00.616 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T10:01:00.664 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:01:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:01:00] "GET /metrics HTTP/1.1" 200 38385 "" "Prometheus/2.51.0" 2026-03-10T10:01:00.665 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:00 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/2557040720' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:01:00.665 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:00 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/2557040720' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "osd", 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "mgr", 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "rgw" 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "17/23 daemons upgraded", 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading iscsi daemons", 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T10:01:00.829 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T10:01:01.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:00 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/2557040720' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:01:01.059 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline; 1 failed cephadm daemon(s) 2026-03-10T10:01:01.059 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T10:01:01.059 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T10:01:01.059 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T10:01:01.059 INFO:teuthology.orchestra.run.vm00.stdout: daemon iscsi.foo.vm00.dqkdwh on vm00 is in unknown state 2026-03-10T10:01:02.000 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:01 vm03 ceph-mon[123760]: from='client.44602 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:02.000 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:01 vm03 ceph-mon[123760]: pgmap v538: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 112 KiB/s rd, 193 B/s wr, 170 op/s 2026-03-10T10:01:02.000 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:01 vm03 ceph-mon[123760]: from='client.34593 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:02.000 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:01 vm03 ceph-mon[123760]: from='client.44611 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:02.000 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:01 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/4060770781' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[118593]: from='client.44602 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[118593]: pgmap v538: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 112 KiB/s rd, 193 B/s wr, 170 op/s 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[118593]: from='client.34593 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[118593]: from='client.44611 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/4060770781' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[121907]: from='client.44602 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[121907]: pgmap v538: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 112 KiB/s rd, 193 B/s wr, 170 op/s 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[121907]: from='client.34593 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[121907]: from='client.44611 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:02.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:01 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/4060770781' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:01:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:02 vm03 ceph-mon[123760]: from='client.44620 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:03.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:02 vm00 ceph-mon[118593]: from='client.44620 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:03.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:03.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:03.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:02 vm00 ceph-mon[121907]: from='client.44620 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:03.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:03.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:04.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:03 vm03 ceph-mon[123760]: pgmap v539: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 140 B/s wr, 142 op/s 2026-03-10T10:01:04.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:03 vm00 ceph-mon[118593]: pgmap v539: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 140 B/s wr, 142 op/s 2026-03-10T10:01:04.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:03 vm00 ceph-mon[121907]: pgmap v539: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 140 B/s wr, 142 op/s 2026-03-10T10:01:05.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:04 vm03 ceph-mon[123760]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:05.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:04 vm00 ceph-mon[118593]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:05.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:04 vm00 ceph-mon[121907]: from='client.25177 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:06.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:05 vm03 ceph-mon[123760]: pgmap v540: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 111 B/s wr, 113 op/s 2026-03-10T10:01:06.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:05 vm00 ceph-mon[118593]: pgmap v540: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 111 B/s wr, 113 op/s 2026-03-10T10:01:06.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:05 vm00 ceph-mon[121907]: pgmap v540: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 111 B/s wr, 113 op/s 2026-03-10T10:01:07.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:06.965Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:07.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:06.966Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:08.017 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:07 vm00 ceph-mon[118593]: pgmap v541: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 62 KiB/s rd, 91 B/s wr, 93 op/s 2026-03-10T10:01:08.017 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:07 vm00 ceph-mon[121907]: pgmap v541: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 62 KiB/s rd, 91 B/s wr, 93 op/s 2026-03-10T10:01:08.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:07 vm03 ceph-mon[123760]: pgmap v541: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 62 KiB/s rd, 91 B/s wr, 93 op/s 2026-03-10T10:01:08.546 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:01:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: 2026-03-10T10:01:08.366+0000 7f82033da640 -1 log_channel(cephadm) log [ERR] : Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed. 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: pgmap v542: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 50 KiB/s rd, 91 B/s wr, 75 op/s 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed. 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: pgmap v543: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 35 op/s 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: pgmap v544: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 16 op/s 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: pgmap v545: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 823 B/s rd, 0 op/s 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: pgmap v546: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: pgmap v547: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: pgmap v548: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:09.777 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:09 vm03 ceph-mon[123760]: Health check failed: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-10T10:01:09.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: pgmap v542: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 50 KiB/s rd, 91 B/s wr, 75 op/s 2026-03-10T10:01:09.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed. 2026-03-10T10:01:09.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:09.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: pgmap v543: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 35 op/s 2026-03-10T10:01:09.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:01:09.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:01:09.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: pgmap v544: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 16 op/s 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: pgmap v545: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 823 B/s rd, 0 op/s 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: pgmap v546: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: pgmap v547: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: pgmap v548: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[118593]: Health check failed: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: pgmap v542: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 50 KiB/s rd, 91 B/s wr, 75 op/s 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed. 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: pgmap v543: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 35 op/s 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: pgmap v544: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 16 op/s 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: pgmap v545: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 823 B/s rd, 0 op/s 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: pgmap v546: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: pgmap v547: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: pgmap v548: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:09.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:09 vm00 ceph-mon[121907]: Health check failed: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-10T10:01:11.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:01:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:01:10] "GET /metrics HTTP/1.1" 200 38400 "" "Prometheus/2.51.0" 2026-03-10T10:01:11.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:11 vm03 ceph-mon[123760]: pgmap v549: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T10:01:11.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:11 vm00 ceph-mon[118593]: pgmap v549: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T10:01:11.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:11 vm00 ceph-mon[121907]: pgmap v549: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T10:01:13.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:13 vm03 ceph-mon[123760]: pgmap v550: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 750 B/s rd, 0 op/s 2026-03-10T10:01:13.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:13 vm00 ceph-mon[118593]: pgmap v550: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 750 B/s rd, 0 op/s 2026-03-10T10:01:13.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:13 vm00 ceph-mon[121907]: pgmap v550: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 750 B/s rd, 0 op/s 2026-03-10T10:01:15.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:15 vm03 ceph-mon[123760]: pgmap v551: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 504 B/s rd, 0 op/s 2026-03-10T10:01:15.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:15 vm00 ceph-mon[118593]: pgmap v551: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 504 B/s rd, 0 op/s 2026-03-10T10:01:15.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:15 vm00 ceph-mon[121907]: pgmap v551: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 504 B/s rd, 0 op/s 2026-03-10T10:01:17.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:16.966Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:17.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:16.967Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:17.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:17 vm00 ceph-mon[118593]: pgmap v552: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 379 B/s rd, 0 op/s 2026-03-10T10:01:17.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:17.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:17.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:17 vm00 ceph-mon[121907]: pgmap v552: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 379 B/s rd, 0 op/s 2026-03-10T10:01:17.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:17.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:17 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:17.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:17 vm03 ceph-mon[123760]: pgmap v552: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 379 B/s rd, 0 op/s 2026-03-10T10:01:17.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:17.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:17 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:19.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: pgmap v553: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 310 B/s rd, 0 op/s 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: pgmap v554: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 0 op/s 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: pgmap v555: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/2592564155' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/812770572' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3361974250"}]: dispatch 2026-03-10T10:01:19.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:19 vm03 ceph-mon[123760]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3361974250"}]: dispatch 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: pgmap v553: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 310 B/s rd, 0 op/s 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: pgmap v554: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 0 op/s 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: pgmap v555: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/2592564155' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/812770572' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3361974250"}]: dispatch 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[118593]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3361974250"}]: dispatch 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: pgmap v553: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 310 B/s rd, 0 op/s 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: pgmap v554: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 0 op/s 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: pgmap v555: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/2592564155' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/812770572' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3361974250"}]: dispatch 2026-03-10T10:01:19.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:19 vm00 ceph-mon[121907]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3361974250"}]: dispatch 2026-03-10T10:01:20.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:20 vm00 ceph-mon[118593]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3361974250"}]': finished 2026-03-10T10:01:20.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:20 vm00 ceph-mon[118593]: osdmap e157: 8 total, 8 up, 8 in 2026-03-10T10:01:20.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:20 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/2782647233' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1570002642"}]: dispatch 2026-03-10T10:01:20.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:20 vm00 ceph-mon[121907]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3361974250"}]': finished 2026-03-10T10:01:20.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:20 vm00 ceph-mon[121907]: osdmap e157: 8 total, 8 up, 8 in 2026-03-10T10:01:20.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:20 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/2782647233' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1570002642"}]: dispatch 2026-03-10T10:01:20.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:20 vm03 ceph-mon[123760]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3361974250"}]': finished 2026-03-10T10:01:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:20 vm03 ceph-mon[123760]: osdmap e157: 8 total, 8 up, 8 in 2026-03-10T10:01:20.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:20 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/2782647233' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1570002642"}]: dispatch 2026-03-10T10:01:21.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:01:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:01:20] "GET /metrics HTTP/1.1" 200 38480 "" "Prometheus/2.51.0" 2026-03-10T10:01:21.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:21 vm03 ceph-mon[123760]: pgmap v557: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 346 B/s rd, 0 op/s 2026-03-10T10:01:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:21 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/2782647233' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1570002642"}]': finished 2026-03-10T10:01:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:21 vm03 ceph-mon[123760]: osdmap e158: 8 total, 8 up, 8 in 2026-03-10T10:01:21.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:21 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/479950843' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1570002642"}]: dispatch 2026-03-10T10:01:21.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:21 vm00 ceph-mon[118593]: pgmap v557: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 346 B/s rd, 0 op/s 2026-03-10T10:01:21.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:21 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/2782647233' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1570002642"}]': finished 2026-03-10T10:01:21.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:21 vm00 ceph-mon[118593]: osdmap e158: 8 total, 8 up, 8 in 2026-03-10T10:01:21.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:21 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/479950843' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1570002642"}]: dispatch 2026-03-10T10:01:21.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:21 vm00 ceph-mon[121907]: pgmap v557: 161 pgs: 161 active+clean; 457 KiB data, 328 MiB used, 160 GiB / 160 GiB avail; 346 B/s rd, 0 op/s 2026-03-10T10:01:21.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:21 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/2782647233' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1570002642"}]': finished 2026-03-10T10:01:21.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:21 vm00 ceph-mon[121907]: osdmap e158: 8 total, 8 up, 8 in 2026-03-10T10:01:21.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:21 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/479950843' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1570002642"}]: dispatch 2026-03-10T10:01:22.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:22 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/479950843' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1570002642"}]': finished 2026-03-10T10:01:22.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:22 vm03 ceph-mon[123760]: osdmap e159: 8 total, 8 up, 8 in 2026-03-10T10:01:22.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:22 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3304343415' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3739011504"}]: dispatch 2026-03-10T10:01:22.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:22 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/479950843' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1570002642"}]': finished 2026-03-10T10:01:22.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:22 vm00 ceph-mon[118593]: osdmap e159: 8 total, 8 up, 8 in 2026-03-10T10:01:22.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:22 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3304343415' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3739011504"}]: dispatch 2026-03-10T10:01:22.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:22 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/479950843' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1570002642"}]': finished 2026-03-10T10:01:22.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:22 vm00 ceph-mon[121907]: osdmap e159: 8 total, 8 up, 8 in 2026-03-10T10:01:22.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:22 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3304343415' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3739011504"}]: dispatch 2026-03-10T10:01:23.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:23 vm03 ceph-mon[123760]: pgmap v560: 161 pgs: 161 active+clean; 457 KiB data, 329 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T10:01:23.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:23 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3304343415' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3739011504"}]': finished 2026-03-10T10:01:23.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:23 vm03 ceph-mon[123760]: osdmap e160: 8 total, 8 up, 8 in 2026-03-10T10:01:23.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:23 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/429980021' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3306559358"}]: dispatch 2026-03-10T10:01:23.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:23 vm03 ceph-mon[123760]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3306559358"}]: dispatch 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[118593]: pgmap v560: 161 pgs: 161 active+clean; 457 KiB data, 329 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3304343415' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3739011504"}]': finished 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[118593]: osdmap e160: 8 total, 8 up, 8 in 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/429980021' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3306559358"}]: dispatch 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[118593]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3306559358"}]: dispatch 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[121907]: pgmap v560: 161 pgs: 161 active+clean; 457 KiB data, 329 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3304343415' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3739011504"}]': finished 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[121907]: osdmap e160: 8 total, 8 up, 8 in 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/429980021' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3306559358"}]: dispatch 2026-03-10T10:01:23.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:23 vm00 ceph-mon[121907]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3306559358"}]: dispatch 2026-03-10T10:01:24.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:24 vm03 ceph-mon[123760]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3306559358"}]': finished 2026-03-10T10:01:24.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:24 vm03 ceph-mon[123760]: osdmap e161: 8 total, 8 up, 8 in 2026-03-10T10:01:24.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:24 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3534269927' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/823819702"}]: dispatch 2026-03-10T10:01:24.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:24 vm03 ceph-mon[123760]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/823819702"}]: dispatch 2026-03-10T10:01:24.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:24 vm00 ceph-mon[118593]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3306559358"}]': finished 2026-03-10T10:01:24.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:24 vm00 ceph-mon[118593]: osdmap e161: 8 total, 8 up, 8 in 2026-03-10T10:01:24.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:24 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3534269927' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/823819702"}]: dispatch 2026-03-10T10:01:24.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:24 vm00 ceph-mon[118593]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/823819702"}]: dispatch 2026-03-10T10:01:24.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:24 vm00 ceph-mon[121907]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3306559358"}]': finished 2026-03-10T10:01:24.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:24 vm00 ceph-mon[121907]: osdmap e161: 8 total, 8 up, 8 in 2026-03-10T10:01:24.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:24 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3534269927' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/823819702"}]: dispatch 2026-03-10T10:01:24.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:24 vm00 ceph-mon[121907]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/823819702"}]: dispatch 2026-03-10T10:01:25.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:25 vm03 ceph-mon[123760]: pgmap v563: 161 pgs: 161 active+clean; 457 KiB data, 329 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:25.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:25 vm03 ceph-mon[123760]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/823819702"}]': finished 2026-03-10T10:01:25.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:25 vm03 ceph-mon[123760]: osdmap e162: 8 total, 8 up, 8 in 2026-03-10T10:01:25.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:25 vm00 ceph-mon[118593]: pgmap v563: 161 pgs: 161 active+clean; 457 KiB data, 329 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:25.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:25 vm00 ceph-mon[118593]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/823819702"}]': finished 2026-03-10T10:01:25.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:25 vm00 ceph-mon[118593]: osdmap e162: 8 total, 8 up, 8 in 2026-03-10T10:01:25.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:25 vm00 ceph-mon[121907]: pgmap v563: 161 pgs: 161 active+clean; 457 KiB data, 329 MiB used, 160 GiB / 160 GiB avail 2026-03-10T10:01:25.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:25 vm00 ceph-mon[121907]: from='client.? ' entity='client.iscsi.foo.vm00.dqkdwh' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/823819702"}]': finished 2026-03-10T10:01:25.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:25 vm00 ceph-mon[121907]: osdmap e162: 8 total, 8 up, 8 in 2026-03-10T10:01:27.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:26.966Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:27.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:26.967Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:27.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:27 vm03 ceph-mon[123760]: pgmap v565: 161 pgs: 161 active+clean; 457 KiB data, 346 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T10:01:27.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:27 vm00 ceph-mon[118593]: pgmap v565: 161 pgs: 161 active+clean; 457 KiB data, 346 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T10:01:27.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:27 vm00 ceph-mon[121907]: pgmap v565: 161 pgs: 161 active+clean; 457 KiB data, 346 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T10:01:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:29 vm00 ceph-mon[118593]: pgmap v566: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:29.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:29 vm00 ceph-mon[118593]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:29.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:29 vm00 ceph-mon[121907]: pgmap v566: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:29.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:29 vm00 ceph-mon[121907]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:29.946 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:29 vm03 ceph-mon[123760]: pgmap v566: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:29.946 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:29 vm03 ceph-mon[123760]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:31.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:01:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:01:30] "GET /metrics HTTP/1.1" 200 38480 "" "Prometheus/2.51.0" 2026-03-10T10:01:31.282 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T10:01:31.502 INFO:teuthology.orchestra.run.vm00.stdout:"Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed." 2026-03-10T10:01:31.566 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T10:01:31.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:31 vm00 ceph-mon[118593]: pgmap v567: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T10:01:31.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:31 vm00 ceph-mon[121907]: pgmap v567: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T10:01:32.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:31 vm03 ceph-mon[123760]: pgmap v567: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 13s ago 14m - - 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 58s ago 14m - - 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (9m) 13s ago 13m - - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (8m) 58s ago 12m 49.1M - dad864ee21e9 011f2081bf92 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 unknown 13s ago 12m - - 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (6m) 58s ago 14m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (8m) 13s ago 15m - - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (6m) 13s ago 15m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (5m) 58s ago 14m 53.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (6m) 13s ago 14m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (9m) 13s ago 13m - - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (9m) 58s ago 13m 9.77M - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (4m) 13s ago 14m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (4m) 13s ago 14m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 88805c559d1d 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (3m) 13s ago 14m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e be8a08b99e6b 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (2m) 13s ago 14m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5468a49ce587 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (2m) 58s ago 13m 53.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 0e3f0f82bc78 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (2m) 58s ago 13m 50.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c6d3a12c7d3 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (98s) 58s ago 13m 49.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09cb5d413a11 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (77s) 58s ago 13m 71.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6aa4b0e4646a 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (7m) 58s ago 12m 50.6M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (45s) 13s ago 12m - - 19.2.3-678-ge911bdeb 654f31e6858e b0a44018a79c 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (66s) 58s ago 12m 99.0M - 19.2.3-678-ge911bdeb 654f31e6858e 06eb70f7ddbe 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (37s) 13s ago 12m - - 19.2.3-678-ge911bdeb 654f31e6858e 866b08fb2642 2026-03-10T10:01:32.251 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (63s) 58s ago 12m 99.1M - 19.2.3-678-ge911bdeb 654f31e6858e b35f07de50cd 2026-03-10T10:01:32.311 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T10:01:32.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:32 vm00 ceph-mon[118593]: from='client.54569 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:32.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:32 vm00 ceph-mon[118593]: from='client.54575 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:32.870 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:32.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:32 vm00 ceph-mon[121907]: from='client.54569 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:32.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:32 vm00 ceph-mon[121907]: from='client.54575 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:32.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 18 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T10:01:32.931 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T10:01:32.982 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'echo "wait for servicemap items w/ changing names to refresh"' 2026-03-10T10:01:33.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:32 vm03 ceph-mon[123760]: from='client.54569 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:33.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:32 vm03 ceph-mon[123760]: from='client.54575 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:33.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:32 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:33.289 INFO:teuthology.orchestra.run.vm00.stdout:wait for servicemap items w/ changing names to refresh 2026-03-10T10:01:33.330 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 60' 2026-03-10T10:01:33.749 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:33 vm00 ceph-mon[118593]: from='client.44695 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:33.749 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:33 vm00 ceph-mon[118593]: pgmap v568: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T10:01:33.749 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:33 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3756057876' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:01:33.749 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:33 vm00 ceph-mon[121907]: from='client.44695 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:33.749 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:33 vm00 ceph-mon[121907]: pgmap v568: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T10:01:33.749 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:33 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3756057876' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:01:34.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:33 vm03 ceph-mon[123760]: from='client.44695 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:01:34.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:33 vm03 ceph-mon[123760]: pgmap v568: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T10:01:34.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:33 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3756057876' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:01:35.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:34 vm00 ceph-mon[118593]: pgmap v569: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T10:01:35.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:34 vm00 ceph-mon[121907]: pgmap v569: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T10:01:35.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:34 vm03 ceph-mon[123760]: pgmap v569: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T10:01:37.373 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:36.967Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:37.373 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:36.968Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:37.707 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:37 vm00 ceph-mon[118593]: pgmap v570: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T10:01:37.707 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:37 vm00 ceph-mon[121907]: pgmap v570: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T10:01:37.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:37 vm03 ceph-mon[123760]: pgmap v570: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: pgmap v571: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: pgmap v572: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: pgmap v573: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 622 B/s rd, 0 op/s 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:01:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: pgmap v571: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: pgmap v572: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: pgmap v573: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 622 B/s rd, 0 op/s 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: pgmap v571: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: pgmap v572: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: pgmap v573: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 622 B/s rd, 0 op/s 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:01:39.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:40.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:40 vm03 ceph-mon[123760]: pgmap v574: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 791 B/s rd, 0 op/s 2026-03-10T10:01:40.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:40 vm03 ceph-mon[123760]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T10:01:40.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:40 vm00 ceph-mon[118593]: pgmap v574: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 791 B/s rd, 0 op/s 2026-03-10T10:01:40.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:40 vm00 ceph-mon[118593]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T10:01:40.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:40 vm00 ceph-mon[121907]: pgmap v574: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 791 B/s rd, 0 op/s 2026-03-10T10:01:40.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:40 vm00 ceph-mon[121907]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T10:01:40.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:01:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:01:40] "GET /metrics HTTP/1.1" 200 38481 "" "Prometheus/2.51.0" 2026-03-10T10:01:42.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:42 vm03 ceph-mon[123760]: pgmap v575: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T10:01:42.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:42 vm00 ceph-mon[118593]: pgmap v575: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T10:01:42.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:42 vm00 ceph-mon[121907]: pgmap v575: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T10:01:44.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:44 vm03 ceph-mon[123760]: pgmap v576: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 791 B/s rd, 0 op/s 2026-03-10T10:01:44.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:44 vm00 ceph-mon[118593]: pgmap v576: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 791 B/s rd, 0 op/s 2026-03-10T10:01:44.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:44 vm00 ceph-mon[121907]: pgmap v576: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 791 B/s rd, 0 op/s 2026-03-10T10:01:46.869 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:46 vm00 ceph-mon[118593]: pgmap v577: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 791 B/s rd, 0 op/s 2026-03-10T10:01:46.869 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:46 vm00 ceph-mon[121907]: pgmap v577: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 791 B/s rd, 0 op/s 2026-03-10T10:01:47.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:46 vm03 ceph-mon[123760]: pgmap v577: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 791 B/s rd, 0 op/s 2026-03-10T10:01:47.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:46.968Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:47.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:46.969Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:48 vm00 ceph-mon[118593]: pgmap v578: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:48.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:48 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:48.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:48 vm00 ceph-mon[121907]: pgmap v578: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:48.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:48.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:48 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:48.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:48 vm03 ceph-mon[123760]: pgmap v578: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:01:48.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:48 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:01:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:50 vm00 ceph-mon[118593]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:50.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:50 vm00 ceph-mon[118593]: pgmap v579: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 999 B/s rd, 0 op/s 2026-03-10T10:01:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:50 vm00 ceph-mon[121907]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:50.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:50 vm00 ceph-mon[121907]: pgmap v579: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 999 B/s rd, 0 op/s 2026-03-10T10:01:50.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:50 vm03 ceph-mon[123760]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:01:50.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:50 vm03 ceph-mon[123760]: pgmap v579: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 999 B/s rd, 0 op/s 2026-03-10T10:01:51.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:01:50 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:01:50] "GET /metrics HTTP/1.1" 200 38482 "" "Prometheus/2.51.0" 2026-03-10T10:01:52.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:52 vm03 ceph-mon[123760]: pgmap v580: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:52.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:52 vm00 ceph-mon[118593]: pgmap v580: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:52.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:52 vm00 ceph-mon[121907]: pgmap v580: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:54.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:54 vm03 ceph-mon[123760]: pgmap v581: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:54.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:54 vm00 ceph-mon[118593]: pgmap v581: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:54.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:54 vm00 ceph-mon[121907]: pgmap v581: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:56.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:56 vm03 ceph-mon[123760]: pgmap v582: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:56.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:56 vm00 ceph-mon[118593]: pgmap v582: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:56.619 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:56 vm00 ceph-mon[121907]: pgmap v582: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:01:57.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:56.969Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:57.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:56.970Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:01:58.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:58 vm03 ceph-mon[123760]: pgmap v583: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:58.596 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:58 vm00 ceph-mon[118593]: pgmap v583: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:58.596 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:58 vm00 ceph-mon[121907]: pgmap v583: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:01:58.869 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:01:58 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:01:58.846Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:59 vm03 ceph-mon[123760]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:59 vm03 ceph-mon[123760]: pgmap v584: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:01:59 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[118593]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[118593]: pgmap v584: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[121907]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[121907]: pgmap v584: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:00.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:01:59 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:01.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:00 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:02:00] "GET /metrics HTTP/1.1" 200 38482 "" "Prometheus/2.51.0" 2026-03-10T10:02:03.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:02 vm03 ceph-mon[123760]: pgmap v585: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:03.048 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:02 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:02:03.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:02 vm00 ceph-mon[118593]: pgmap v585: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:03.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:02 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:02:03.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:02 vm00 ceph-mon[121907]: pgmap v585: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:03.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:02 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:02:05.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:04 vm03 ceph-mon[123760]: pgmap v586: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:05.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:04 vm00 ceph-mon[118593]: pgmap v586: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:05.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:04 vm00 ceph-mon[121907]: pgmap v586: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:07.047 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:06 vm03 ceph-mon[123760]: pgmap v587: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:07.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:06 vm00 ceph-mon[118593]: pgmap v587: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:07.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:06.970Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:07.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:06.970Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:07.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:06 vm00 ceph-mon[121907]: pgmap v587: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:09.119 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:08 vm00 ceph-mon[118593]: pgmap v588: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:09.119 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:08 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:08.845Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:09.119 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:08 vm00 ceph-mon[121907]: pgmap v588: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:09.151 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:08 vm03 ceph-mon[123760]: pgmap v588: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:10.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:09 vm03 ceph-mon[123760]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:10.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:09 vm03 ceph-mon[123760]: pgmap v589: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:09 vm00 ceph-mon[118593]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:10.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:09 vm00 ceph-mon[118593]: pgmap v589: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:09 vm00 ceph-mon[121907]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:10.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:09 vm00 ceph-mon[121907]: pgmap v589: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:11.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:10 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:02:10] "GET /metrics HTTP/1.1" 200 38480 "" "Prometheus/2.51.0" 2026-03-10T10:02:12.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:11 vm03 ceph-mon[123760]: pgmap v590: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:12.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:11 vm00 ceph-mon[118593]: pgmap v590: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:12.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:11 vm00 ceph-mon[121907]: pgmap v590: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:14.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:13 vm03 ceph-mon[123760]: pgmap v591: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:14.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:13 vm00 ceph-mon[118593]: pgmap v591: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:14.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:13 vm00 ceph-mon[121907]: pgmap v591: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:16.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:15 vm03 ceph-mon[123760]: pgmap v592: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:16.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:15 vm00 ceph-mon[118593]: pgmap v592: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:16.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:15 vm00 ceph-mon[121907]: pgmap v592: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:17.120 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:16.970Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:17.120 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:16.971Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:18.297 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:18 vm03 ceph-mon[123760]: pgmap v593: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:18.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:18 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:02:18.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:17 vm00 ceph-mon[118593]: pgmap v593: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:18.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:17 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:02:18.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:18 vm00 ceph-mon[121907]: pgmap v593: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:18.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:18 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:02:20.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:20 vm03 ceph-mon[123760]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:20.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:20 vm03 ceph-mon[123760]: pgmap v594: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:20.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:20.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:20.298 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:20 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[118593]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[118593]: pgmap v594: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[121907]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[121907]: pgmap v594: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:20.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:20 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:21.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:20 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:02:20] "GET /metrics HTTP/1.1" 200 38483 "" "Prometheus/2.51.0" 2026-03-10T10:02:22.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:22 vm00 ceph-mon[118593]: pgmap v595: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:22.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:22 vm00 ceph-mon[121907]: pgmap v595: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:22.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:22 vm03 ceph-mon[123760]: pgmap v595: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:24.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:24 vm00 ceph-mon[118593]: pgmap v596: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:24.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:24 vm00 ceph-mon[121907]: pgmap v596: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:24.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:24 vm03 ceph-mon[123760]: pgmap v596: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:26.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:26 vm00 ceph-mon[118593]: pgmap v597: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:26.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:26 vm00 ceph-mon[121907]: pgmap v597: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:26.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:26 vm03 ceph-mon[123760]: pgmap v597: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:27.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:26.971Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:27.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:26.972Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:28.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:28 vm00 ceph-mon[118593]: pgmap v598: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:28.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:28 vm00 ceph-mon[121907]: pgmap v598: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:28.376 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:28 vm03 ceph-mon[123760]: pgmap v598: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:30 vm00 ceph-mon[118593]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:30.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:30 vm00 ceph-mon[118593]: pgmap v599: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:30.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:30 vm00 ceph-mon[121907]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:30.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:30 vm00 ceph-mon[121907]: pgmap v599: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:30.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:30 vm03 ceph-mon[123760]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:30.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:30 vm03 ceph-mon[123760]: pgmap v599: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:31.119 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:30 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:02:30] "GET /metrics HTTP/1.1" 200 38483 "" "Prometheus/2.51.0" 2026-03-10T10:02:32.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:32 vm00 ceph-mon[118593]: pgmap v600: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:32.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:32 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:02:32.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:32 vm00 ceph-mon[121907]: pgmap v600: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:32.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:32 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:02:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:32 vm03 ceph-mon[123760]: pgmap v600: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:32.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:32 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T10:02:33.680 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T10:02:34.211 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:34 vm00 ceph-mon[121907]: pgmap v601: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:34.211 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:34 vm00 ceph-mon[118593]: pgmap v601: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm00 vm00 running 15s ago 15m - - 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:agent.vm03 vm03 error 2m ago 15m - - 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (10m) 15s ago 14m - - 0.25.0 c8568f914cd2 44fbe9462c87 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm03 *:3000 running (9m) 2m ago 13m 49.1M - dad864ee21e9 011f2081bf92 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dqkdwh vm00 running (75s) 15s ago 13m - - 3.9 654f31e6858e b99946cc73f8 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm03 *:8443,9283,8765 running (7m) 2m ago 15m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 37927dd47101 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (9m) 15s ago 16m - - 19.2.3-678-ge911bdeb 654f31e6858e a31a53f9eed3 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (7m) 15s ago 16m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 5f537ed367b0 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm03 running (6m) 2m ago 15m 53.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8f6ea28bf391 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (7m) 15s ago 15m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 9fee6887b35b 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (10m) 15s ago 14m - - 1.7.0 72c9c2088986 b0bf12b366fd 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm03 *:9100 running (10m) 2m ago 14m 9.77M - 1.7.0 72c9c2088986 4e046233188d 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (5m) 15s ago 15m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6eea3ec528db 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (5m) 15s ago 15m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 88805c559d1d 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (4m) 15s ago 15m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e be8a08b99e6b 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (3m) 15s ago 15m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5468a49ce587 2026-03-10T10:02:34.211 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm03 running (3m) 2m ago 14m 53.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 0e3f0f82bc78 2026-03-10T10:02:34.212 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm03 running (3m) 2m ago 14m 50.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3c6d3a12c7d3 2026-03-10T10:02:34.212 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm03 running (2m) 2m ago 14m 49.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09cb5d413a11 2026-03-10T10:02:34.212 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm03 running (2m) 2m ago 14m 71.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6aa4b0e4646a 2026-03-10T10:02:34.212 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm03 *:9095 running (8m) 2m ago 14m 50.6M - 2.51.0 1d3b7f56885b fbf1a95f0e55 2026-03-10T10:02:34.212 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.gcwrcv vm00 *:8000 running (107s) 15s ago 13m - - 19.2.3-678-ge911bdeb 654f31e6858e b0a44018a79c 2026-03-10T10:02:34.212 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm03.smqfat vm03 *:8000 running (2m) 2m ago 13m 99.0M - 19.2.3-678-ge911bdeb 654f31e6858e 06eb70f7ddbe 2026-03-10T10:02:34.212 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm00.tpyqjn vm00 *:80 running (99s) 15s ago 13m - - 19.2.3-678-ge911bdeb 654f31e6858e 866b08fb2642 2026-03-10T10:02:34.212 INFO:teuthology.orchestra.run.vm00.stdout:rgw.smpl.vm03.evsibt vm03 *:80 running (2m) 2m ago 13m 99.1M - 19.2.3-678-ge911bdeb 654f31e6858e b35f07de50cd 2026-03-10T10:02:34.288 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T10:02:34.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:34 vm03 ceph-mon[123760]: pgmap v601: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 17 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T10:02:34.868 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T10:02:34.926 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T10:02:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:35 vm00 ceph-mon[118593]: from='client.44704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:35.120 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:35 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/1739468094' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:02:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:35 vm00 ceph-mon[121907]: from='client.44704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:35.120 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:35 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/1739468094' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "iscsi", 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "mgr", 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "osd", 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "rgw" 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "18/23 daemons upgraded", 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed.", 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": true 2026-03-10T10:02:35.518 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T10:02:35.547 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:35 vm03 ceph-mon[123760]: from='client.44704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:35.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:35 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/1739468094' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:02:35.601 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T10:02:36.182 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN 1 Cephadm Agent(s) are not reporting. Hosts may be offline; Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed. 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] CEPHADM_AGENT_DOWN: 1 Cephadm Agent(s) are not reporting. Hosts may be offline 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: Cephadm agent on host vm03 has not reported in 60.0 seconds. Agent is assumed down and host may be offline. 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dqkdwh on host vm00 failed. 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: Upgrade daemon: iscsi.foo.vm00.dqkdwh: cephadm exited with an error code: 1, stderr: Redeploy daemon iscsi.foo.vm00.dqkdwh ... 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:Creating ceph-iscsi config... 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/iscsi-gateway.cfg 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:Write file: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/iscsi.foo.vm00.dqkdwh/tcmu-runner-entrypoint.sh 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:Failed to trim old cgroups /sys/fs/cgroup/system.slice/system-ceph\x2de2d4b2ee\x2d1c65\x2d11f1\x2dbae0\x2db525704df8fa.slice/ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:Non-zero exit code 1 from systemctl start ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:Traceback (most recent call last): 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: exec(code, run_globals) 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1090, in deploy_daemon 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1237, in deploy_daemon_units 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:RuntimeError: Failed command: systemctl start ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh: Job for ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service failed because the control process exited with error code. 2026-03-10T10:02:36.185 INFO:teuthology.orchestra.run.vm00.stdout:See "systemctl status ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" and "journalctl -xeu ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@iscsi.foo.vm00.dqkdwh.service" for details. 2026-03-10T10:02:36.268 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | length == 1'"'"'' 2026-03-10T10:02:36.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:36 vm00 ceph-mon[118593]: pgmap v602: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:36.369 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:36 vm00 ceph-mon[118593]: from='client.34683 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:36.369 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:36 vm00 ceph-mon[121907]: pgmap v602: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:36.370 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:36 vm00 ceph-mon[121907]: from='client.34683 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:36.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:36 vm03 ceph-mon[123760]: pgmap v602: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T10:02:36.548 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:36 vm03 ceph-mon[123760]: from='client.34683 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:36.910 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T10:02:37.120 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:36.972Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:37.120 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:36.973Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:37.366 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | keys'"'"' | grep $sha1' 2026-03-10T10:02:37.590 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:37 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/3485927061' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:02:37.590 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:37 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/2155900932' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:02:37.590 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:37 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/3485927061' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:02:37.590 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:37 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/2155900932' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:02:37.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:37 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/3485927061' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T10:02:37.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:37 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/2155900932' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:02:38.010 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-10T10:02:38.055 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls | grep '"'"'^osd '"'"'' 2026-03-10T10:02:38.618 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:38 vm00 ceph-mon[118593]: pgmap v603: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:38.618 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:38 vm00 ceph-mon[118593]: from='client.? 192.168.123.100:0/427896810' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:02:38.618 INFO:teuthology.orchestra.run.vm00.stdout:osd 8 2m ago - 2026-03-10T10:02:38.618 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:38 vm00 ceph-mon[121907]: pgmap v603: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:38.618 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:38 vm00 ceph-mon[121907]: from='client.? 192.168.123.100:0/427896810' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:02:38.764 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T10:02:38.767 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm00.local 2026-03-10T10:02:38.767 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- bash -c 'ceph orch upgrade ls' 2026-03-10T10:02:38.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:38 vm03 ceph-mon[123760]: pgmap v603: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T10:02:38.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:38 vm03 ceph-mon[123760]: from='client.? 192.168.123.100:0/427896810' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T10:02:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[118593]: from='client.44728 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:02:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:02:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.619 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[118593]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[121907]: from='client.44728 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:02:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:02:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.620 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:39 vm00 ceph-mon[121907]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:39 vm03 ceph-mon[123760]: from='client.44728 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T10:02:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T10:02:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:39.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:39 vm03 ceph-mon[123760]: from='mgr.25264 192.168.123.100:0/2549796273' entity='mgr.y' 2026-03-10T10:02:40.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:40 vm00 ceph-mon[118593]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:40.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:40 vm00 ceph-mon[118593]: pgmap v604: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T10:02:40.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:40 vm00 ceph-mon[118593]: pgmap v605: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 993 B/s rd, 0 op/s 2026-03-10T10:02:40.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:40 vm00 ceph-mon[118593]: from='client.54614 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:40.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:40 vm00 ceph-mon[121907]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:40.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:40 vm00 ceph-mon[121907]: pgmap v604: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T10:02:40.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:40 vm00 ceph-mon[121907]: pgmap v605: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 993 B/s rd, 0 op/s 2026-03-10T10:02:40.663 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:40 vm00 ceph-mon[121907]: from='client.54614 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:40.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:40 vm03 ceph-mon[123760]: from='client.44638 -' entity='client.iscsi.foo.vm00.dqkdwh' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T10:02:40.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:40 vm03 ceph-mon[123760]: pgmap v604: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T10:02:40.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:40 vm03 ceph-mon[123760]: pgmap v605: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 993 B/s rd, 0 op/s 2026-03-10T10:02:40.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:40 vm03 ceph-mon[123760]: from='client.54614 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "image": "quay.io/ceph/ceph", 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "registry": "quay.io", 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "bare_image": "ceph/ceph", 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "versions": [ 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "20.2.0", 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "20.1.1", 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "20.1.0", 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "19.2.3", 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "19.2.2", 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "19.2.1", 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: "19.2.0" 2026-03-10T10:02:40.820 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-10T10:02:40.821 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T10:02:40.880 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0' 2026-03-10T10:02:41.027 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:40 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: ::ffff:192.168.123.103 - - [10/Mar/2026:10:02:40] "GET /metrics HTTP/1.1" 200 38484 "" "Prometheus/2.51.0" 2026-03-10T10:02:42.784 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:42 vm00 ceph-mon[121907]: pgmap v606: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 993 B/s rd, 0 op/s 2026-03-10T10:02:42.784 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:42 vm00 ceph-mon[121907]: from='client.34713 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:42.784 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:42 vm00 ceph-mon[118593]: pgmap v606: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 993 B/s rd, 0 op/s 2026-03-10T10:02:42.784 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:42 vm00 ceph-mon[118593]: from='client.34713 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:42.785 INFO:teuthology.orchestra.run.vm00.stdout: "16.2.0", 2026-03-10T10:02:42.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:42 vm03 ceph-mon[123760]: pgmap v606: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 993 B/s rd, 0 op/s 2026-03-10T10:02:42.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:42 vm03 ceph-mon[123760]: from='client.34713 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:42.847 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2' 2026-03-10T10:02:44.797 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:44 vm03 ceph-mon[123760]: pgmap v607: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 993 B/s rd, 0 op/s 2026-03-10T10:02:44.798 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:44 vm03 ceph-mon[123760]: from='client.34719 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:44.810 INFO:teuthology.orchestra.run.vm00.stdout: "v16.2.2", 2026-03-10T10:02:44.810 INFO:teuthology.orchestra.run.vm00.stdout: "v16.2.2-20210505", 2026-03-10T10:02:44.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:44 vm00 ceph-mon[118593]: pgmap v607: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 993 B/s rd, 0 op/s 2026-03-10T10:02:44.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:44 vm00 ceph-mon[118593]: from='client.34719 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:44.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:44 vm00 ceph-mon[121907]: pgmap v607: 161 pgs: 161 active+clean; 457 KiB data, 347 MiB used, 160 GiB / 160 GiB avail; 993 B/s rd, 0 op/s 2026-03-10T10:02:44.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:44 vm00 ceph-mon[121907]: from='client.34719 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T10:02:44.849 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-10T10:02:44.851 INFO:tasks.cephadm:Teardown begin 2026-03-10T10:02:44.851 DEBUG:teuthology.orchestra.run.vm00:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T10:02:44.875 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T10:02:44.902 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-10T10:02:44.902 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa -- ceph mgr module disable cephadm 2026-03-10T10:02:45.174 INFO:teuthology.orchestra.run.vm00.stderr:Error: statfs /etc/ceph/ceph.client.admin.keyring: no such file or directory 2026-03-10T10:02:45.192 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-10T10:02:45.192 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-10T10:02:45.192 DEBUG:teuthology.orchestra.run.vm00:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T10:02:45.207 DEBUG:teuthology.orchestra.run.vm03:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T10:02:45.222 INFO:tasks.cephadm:Stopping all daemons... 2026-03-10T10:02:45.222 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-10T10:02:45.222 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.a 2026-03-10T10:02:45.341 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 10:02:45 vm00 systemd[1]: Stopping Ceph mon.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:02:45.495 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.a.service' 2026-03-10T10:02:45.530 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:02:45.531 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-10T10:02:45.531 INFO:tasks.cephadm.mon.b:Stopping mon.c... 2026-03-10T10:02:45.531 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.c 2026-03-10T10:02:45.594 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:10:02:45] ENGINE Bus STOPPING 2026-03-10T10:02:45.814 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.c.service' 2026-03-10T10:02:45.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:10:02:45] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T10:02:45.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:10:02:45] ENGINE Bus STOPPED 2026-03-10T10:02:45.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:10:02:45] ENGINE Bus STARTING 2026-03-10T10:02:45.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:10:02:45] ENGINE Serving on http://:::9283 2026-03-10T10:02:45.869 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y[95830]: [10/Mar/2026:10:02:45] ENGINE Bus STARTED 2026-03-10T10:02:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:45 vm00 systemd[1]: Stopping Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:02:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c[121903]: 2026-03-10T10:02:45.679+0000 7f39f3b4a640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:02:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:45 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c[121903]: 2026-03-10T10:02:45.679+0000 7f39f3b4a640 -1 mon.c@1(peon) e4 *** Got Signal Terminated *** 2026-03-10T10:02:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:45 vm00 podman[167875]: 2026-03-10 10:02:45.735104328 +0000 UTC m=+0.071448176 container died 9fee6887b35b5bb480921935cea59236b4d2a06f1c198313ecbf7584b50bbee7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid) 2026-03-10T10:02:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:45 vm00 podman[167875]: 2026-03-10 10:02:45.752984475 +0000 UTC m=+0.089328323 container remove 9fee6887b35b5bb480921935cea59236b4d2a06f1c198313ecbf7584b50bbee7 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid) 2026-03-10T10:02:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:45 vm00 bash[167875]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-c 2026-03-10T10:02:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:45 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.c.service: Deactivated successfully. 2026-03-10T10:02:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:45 vm00 systemd[1]: Stopped Ceph mon.c for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:02:45.870 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 10:02:45 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.c.service: Consumed 6.138s CPU time. 2026-03-10T10:02:45.888 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:02:45.888 INFO:tasks.cephadm.mon.b:Stopped mon.c 2026-03-10T10:02:45.888 INFO:tasks.cephadm.mon.b:Stopping mon.b... 2026-03-10T10:02:45.888 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.b 2026-03-10T10:02:46.245 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:45 vm03 systemd[1]: Stopping Ceph mon.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:02:46.245 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:46 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b[123756]: 2026-03-10T10:02:46.007+0000 7ff0ea46b640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:02:46.245 INFO:journalctl@ceph.mon.b.vm03.stdout:Mar 10 10:02:46 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mon-b[123756]: 2026-03-10T10:02:46.007+0000 7ff0ea46b640 -1 mon.b@2(peon) e4 *** Got Signal Terminated *** 2026-03-10T10:02:46.340 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mon.b.service' 2026-03-10T10:02:46.378 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:02:46.378 INFO:tasks.cephadm.mon.b:Stopped mon.b 2026-03-10T10:02:46.378 INFO:tasks.cephadm.mgr.y:Stopping mgr.y... 2026-03-10T10:02:46.378 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y 2026-03-10T10:02:46.652 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service' 2026-03-10T10:02:46.679 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:46 vm00 systemd[1]: Stopping Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:02:46.679 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:46 vm00 podman[167991]: 2026-03-10 10:02:46.550188564 +0000 UTC m=+0.081074674 container died a31a53f9eed3f2c67abf307bbad9373cbbfcf6d68b2c455713adb4194ad66f4f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, org.label-schema.schema-version=1.0, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2) 2026-03-10T10:02:46.679 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:46 vm00 podman[167991]: 2026-03-10 10:02:46.577457885 +0000 UTC m=+0.108343995 container remove a31a53f9eed3f2c67abf307bbad9373cbbfcf6d68b2c455713adb4194ad66f4f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.build-date=20260223) 2026-03-10T10:02:46.679 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:46 vm00 bash[167991]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-mgr-y 2026-03-10T10:02:46.679 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:46 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Deactivated successfully. 2026-03-10T10:02:46.679 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:46 vm00 systemd[1]: Stopped Ceph mgr.y for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:02:46.679 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 10:02:46 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.y.service: Consumed 2min 29.162s CPU time. 2026-03-10T10:02:46.690 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:02:46.691 INFO:tasks.cephadm.mgr.y:Stopped mgr.y 2026-03-10T10:02:46.691 INFO:tasks.cephadm.mgr.x:Stopping mgr.x... 2026-03-10T10:02:46.691 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x 2026-03-10T10:02:46.798 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 10 10:02:46 vm03 systemd[1]: Stopping Ceph mgr.x for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:02:46.954 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@mgr.x.service' 2026-03-10T10:02:46.991 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:02:46.991 INFO:tasks.cephadm.mgr.x:Stopped mgr.x 2026-03-10T10:02:46.991 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-10T10:02:46.991 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.0 2026-03-10T10:02:47.370 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:46.973Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:47.370 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:46 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:46.975Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:47.370 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:47 vm00 systemd[1]: Stopping Ceph osd.0 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:02:47.370 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[136489]: 2026-03-10T10:02:47.158+0000 7f49d9587640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:02:47.370 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[136489]: 2026-03-10T10:02:47.158+0000 7f49d9587640 -1 osd.0 162 *** Got signal Terminated *** 2026-03-10T10:02:47.370 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:47 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0[136489]: 2026-03-10T10:02:47.158+0000 7f49d9587640 -1 osd.0 162 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T10:02:52.459 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:52 vm00 podman[168096]: 2026-03-10 10:02:52.185770062 +0000 UTC m=+5.096502350 container died 6eea3ec528dbcaeb5c48e048d70ed59c92090930ab2fe3593f873227c43de518 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.build-date=20260223) 2026-03-10T10:02:52.460 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:52 vm00 podman[168096]: 2026-03-10 10:02:52.214217578 +0000 UTC m=+5.124949866 container remove 6eea3ec528dbcaeb5c48e048d70ed59c92090930ab2fe3593f873227c43de518 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS) 2026-03-10T10:02:52.460 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:52 vm00 bash[168096]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0 2026-03-10T10:02:52.460 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:52 vm00 podman[168164]: 2026-03-10 10:02:52.368458221 +0000 UTC m=+0.016757006 container create 3dbdeabb34b332a15ca7a8617fd9c821285b5c423e10c6e03520e9afd9b4b368 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS) 2026-03-10T10:02:52.460 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:52 vm00 podman[168164]: 2026-03-10 10:02:52.409651986 +0000 UTC m=+0.057950771 container init 3dbdeabb34b332a15ca7a8617fd9c821285b5c423e10c6e03520e9afd9b4b368 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3) 2026-03-10T10:02:52.460 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:52 vm00 podman[168164]: 2026-03-10 10:02:52.412810528 +0000 UTC m=+0.061109313 container start 3dbdeabb34b332a15ca7a8617fd9c821285b5c423e10c6e03520e9afd9b4b368 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True) 2026-03-10T10:02:52.460 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 10:02:52 vm00 podman[168164]: 2026-03-10 10:02:52.413727434 +0000 UTC m=+0.062026219 container attach 3dbdeabb34b332a15ca7a8617fd9c821285b5c423e10c6e03520e9afd9b4b368 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-0-deactivate, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T10:02:52.636 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.0.service' 2026-03-10T10:02:52.674 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:02:52.674 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-10T10:02:52.674 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-10T10:02:52.674 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1 2026-03-10T10:02:53.119 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:52 vm00 systemd[1]: Stopping Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:02:53.119 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[142865]: 2026-03-10T10:02:52.848+0000 7fa8f6634640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:02:53.119 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[142865]: 2026-03-10T10:02:52.848+0000 7fa8f6634640 -1 osd.1 162 *** Got signal Terminated *** 2026-03-10T10:02:53.119 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:52 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1[142865]: 2026-03-10T10:02:52.848+0000 7fa8f6634640 -1 osd.1 162 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T10:02:57.321 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:56.974Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:57.321 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:02:56 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:02:56.976Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:02:58.128 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:57 vm00 podman[168261]: 2026-03-10 10:02:57.876194233 +0000 UTC m=+5.045220623 container died 88805c559d1d70cbe551b17205fb1afea9ffba59937bb65bcb4dc2d935128aa9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True) 2026-03-10T10:02:58.128 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:57 vm00 podman[168261]: 2026-03-10 10:02:57.909509561 +0000 UTC m=+5.078535951 container remove 88805c559d1d70cbe551b17205fb1afea9ffba59937bb65bcb4dc2d935128aa9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-10T10:02:58.128 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:57 vm00 bash[168261]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1 2026-03-10T10:02:58.129 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 podman[168428]: 2026-03-10 10:02:58.08313087 +0000 UTC m=+0.023145364 container create 0c9443a8b817ddce54cb9d02896839bc25e2eccef2a40154f30dafd0c01d6ad2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T10:02:58.380 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1.service' 2026-03-10T10:02:58.406 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 podman[168428]: 2026-03-10 10:02:58.132317946 +0000 UTC m=+0.072332431 container init 0c9443a8b817ddce54cb9d02896839bc25e2eccef2a40154f30dafd0c01d6ad2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T10:02:58.407 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 podman[168428]: 2026-03-10 10:02:58.135647758 +0000 UTC m=+0.075662252 container start 0c9443a8b817ddce54cb9d02896839bc25e2eccef2a40154f30dafd0c01d6ad2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, ceph=True, org.label-schema.schema-version=1.0) 2026-03-10T10:02:58.407 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 podman[168428]: 2026-03-10 10:02:58.136614999 +0000 UTC m=+0.076629493 container attach 0c9443a8b817ddce54cb9d02896839bc25e2eccef2a40154f30dafd0c01d6ad2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T10:02:58.407 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 podman[168428]: 2026-03-10 10:02:58.074944316 +0000 UTC m=+0.014958811 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:02:58.407 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 podman[168428]: 2026-03-10 10:02:58.273633575 +0000 UTC m=+0.213648069 container died 0c9443a8b817ddce54cb9d02896839bc25e2eccef2a40154f30dafd0c01d6ad2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, ceph=True, OSD_FLAVOR=default, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T10:02:58.407 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 podman[168428]: 2026-03-10 10:02:58.366654488 +0000 UTC m=+0.306668982 container remove 0c9443a8b817ddce54cb9d02896839bc25e2eccef2a40154f30dafd0c01d6ad2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-1-deactivate, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.41.3) 2026-03-10T10:02:58.407 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1.service: Deactivated successfully. 2026-03-10T10:02:58.407 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 systemd[1]: Stopped Ceph osd.1 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:02:58.407 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 10:02:58 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.1.service: Consumed 4.171s CPU time. 2026-03-10T10:02:58.419 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:02:58.419 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-10T10:02:58.419 INFO:tasks.cephadm.osd.2:Stopping osd.2... 2026-03-10T10:02:58.419 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.2 2026-03-10T10:02:58.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:02:58 vm00 systemd[1]: Stopping Ceph osd.2 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:02:58.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:02:58 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[148163]: 2026-03-10T10:02:58.575+0000 7f28456cd640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:02:58.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:02:58 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[148163]: 2026-03-10T10:02:58.575+0000 7f28456cd640 -1 osd.2 162 *** Got signal Terminated *** 2026-03-10T10:02:58.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:02:58 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2[148163]: 2026-03-10T10:02:58.575+0000 7f28456cd640 -1 osd.2 162 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T10:03:03.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:03:03 vm00 podman[168560]: 2026-03-10 10:03:03.607173845 +0000 UTC m=+5.047476677 container died be8a08b99e6b10d02a7dfaea5b8b7bd492d136010c1defd27e79e8fccb30ff9e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-10T10:03:03.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:03:03 vm00 podman[168560]: 2026-03-10 10:03:03.635231202 +0000 UTC m=+5.075534034 container remove be8a08b99e6b10d02a7dfaea5b8b7bd492d136010c1defd27e79e8fccb30ff9e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223) 2026-03-10T10:03:03.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:03:03 vm00 bash[168560]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2 2026-03-10T10:03:03.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:03:03 vm00 podman[169123]: 2026-03-10 10:03:03.796315925 +0000 UTC m=+0.019664628 container create 8ec1a9a3b36a719dd98cfe3da959ed04e3ddb20bf9b7123c8c406e7183956f57 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T10:03:03.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:03:03 vm00 podman[169123]: 2026-03-10 10:03:03.833328393 +0000 UTC m=+0.056677117 container init 8ec1a9a3b36a719dd98cfe3da959ed04e3ddb20bf9b7123c8c406e7183956f57 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, org.label-schema.build-date=20260223, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, ceph=True) 2026-03-10T10:03:03.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:03:03 vm00 podman[169123]: 2026-03-10 10:03:03.837828596 +0000 UTC m=+0.061177311 container start 8ec1a9a3b36a719dd98cfe3da959ed04e3ddb20bf9b7123c8c406e7183956f57 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_REF=squid) 2026-03-10T10:03:03.870 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 10:03:03 vm00 podman[169123]: 2026-03-10 10:03:03.839012512 +0000 UTC m=+0.062361226 container attach 8ec1a9a3b36a719dd98cfe3da959ed04e3ddb20bf9b7123c8c406e7183956f57 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-2-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T10:03:04.026 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.2.service' 2026-03-10T10:03:04.065 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:03:04.065 INFO:tasks.cephadm.osd.2:Stopped osd.2 2026-03-10T10:03:04.065 INFO:tasks.cephadm.osd.3:Stopping osd.3... 2026-03-10T10:03:04.065 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.3 2026-03-10T10:03:04.148 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:04 vm00 systemd[1]: Stopping Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:03:04.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[153580]: 2026-03-10T10:03:04.227+0000 7fd2a3800640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:03:04.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[153580]: 2026-03-10T10:03:04.227+0000 7fd2a3800640 -1 osd.3 162 *** Got signal Terminated *** 2026-03-10T10:03:04.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:04 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3[153580]: 2026-03-10T10:03:04.227+0000 7fd2a3800640 -1 osd.3 162 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T10:03:07.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:03:06.975Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:03:07.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:06 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:03:06.975Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:03:09.619 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:09 vm00 podman[169220]: 2026-03-10 10:03:09.258970066 +0000 UTC m=+5.047435370 container died 5468a49ce587f13fa466505447f148d6ad98c8bf9f67d8bf9f7da07a7e82fe61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T10:03:10.299 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:09.854Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=node msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=node-exporter\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T10:03:10.299 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:09.856Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph-exporter msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=ceph-exporter\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T10:03:10.299 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:09.856Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=mgr-prometheus\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T10:03:10.299 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:09.856Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nfs msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=nfs\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T10:03:10.299 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:09.856Z caller=refresh.go:90 level=error component="discovery manager notify" discovery=http config=config-0 msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=alertmanager\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T10:03:10.299 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:09.856Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nvmeof msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=nvmeof\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T10:03:10.514 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 podman[169220]: 2026-03-10 10:03:10.210237484 +0000 UTC m=+5.998702788 container remove 5468a49ce587f13fa466505447f148d6ad98c8bf9f67d8bf9f7da07a7e82fe61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T10:03:10.515 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 bash[169220]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3 2026-03-10T10:03:10.515 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 podman[169285]: 2026-03-10 10:03:10.400277753 +0000 UTC m=+0.055088693 container create 68c6bfcbb0ef433ee87c5a5802ce1bdfc82aefbe96668dfe743847b3cf181870 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T10:03:10.515 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 podman[169285]: 2026-03-10 10:03:10.354375376 +0000 UTC m=+0.009186316 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:03:10.719 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.3.service' 2026-03-10T10:03:10.789 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 podman[169285]: 2026-03-10 10:03:10.534545952 +0000 UTC m=+0.189356892 container init 68c6bfcbb0ef433ee87c5a5802ce1bdfc82aefbe96668dfe743847b3cf181870 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.schema-version=1.0) 2026-03-10T10:03:10.789 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 podman[169285]: 2026-03-10 10:03:10.537893058 +0000 UTC m=+0.192703998 container start 68c6bfcbb0ef433ee87c5a5802ce1bdfc82aefbe96668dfe743847b3cf181870 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T10:03:10.789 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 podman[169285]: 2026-03-10 10:03:10.549876037 +0000 UTC m=+0.204686977 container attach 68c6bfcbb0ef433ee87c5a5802ce1bdfc82aefbe96668dfe743847b3cf181870 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T10:03:10.789 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 conmon[169296]: conmon 68c6bfcbb0ef433ee87c : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-68c6bfcbb0ef433ee87c5a5802ce1bdfc82aefbe96668dfe743847b3cf181870.scope/container/memory.events 2026-03-10T10:03:10.789 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 podman[169285]: 2026-03-10 10:03:10.68357479 +0000 UTC m=+0.338385730 container died 68c6bfcbb0ef433ee87c5a5802ce1bdfc82aefbe96668dfe743847b3cf181870 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T10:03:10.789 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 podman[169285]: 2026-03-10 10:03:10.70197491 +0000 UTC m=+0.356785850 container remove 68c6bfcbb0ef433ee87c5a5802ce1bdfc82aefbe96668dfe743847b3cf181870 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-3-deactivate, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T10:03:10.789 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.3.service: Deactivated successfully. 2026-03-10T10:03:10.789 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 systemd[1]: Stopped Ceph osd.3 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:03:10.789 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 10:03:10 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.3.service: Consumed 3.853s CPU time. 2026-03-10T10:03:10.799 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:03:10.799 INFO:tasks.cephadm.osd.3:Stopped osd.3 2026-03-10T10:03:10.799 INFO:tasks.cephadm.osd.4:Stopping osd.4... 2026-03-10T10:03:10.799 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.4 2026-03-10T10:03:11.298 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:10 vm03 systemd[1]: Stopping Ceph osd.4 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:03:11.298 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[147747]: 2026-03-10T10:03:10.906+0000 7fdb0d92d640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:03:11.298 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[147747]: 2026-03-10T10:03:10.906+0000 7fdb0d92d640 -1 osd.4 162 *** Got signal Terminated *** 2026-03-10T10:03:11.298 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:10 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[147747]: 2026-03-10T10:03:10.906+0000 7fdb0d92d640 -1 osd.4 162 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T10:03:14.297 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:14.042+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:15.032 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:14 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:14.710+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.397299+0000 front 2026-03-10T10:02:50.397331+0000 (oldest deadline 2026-03-10T10:03:13.896790+0000) 2026-03-10T10:03:15.297 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:15.031+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:15.754 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4[147747]: 2026-03-10T10:03:15.309+0000 7fdb09f46640 -1 osd.4 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:49.270622+0000 front 2026-03-10T10:02:49.270605+0000 (oldest deadline 2026-03-10T10:03:15.170020+0000) 2026-03-10T10:03:16.022 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:15 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:15.752+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.397299+0000 front 2026-03-10T10:02:50.397331+0000 (oldest deadline 2026-03-10T10:03:13.896790+0000) 2026-03-10T10:03:16.022 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:15 vm03 podman[164767]: 2026-03-10 10:03:15.938756044 +0000 UTC m=+5.048528740 container died 0e3f0f82bc78bcccc69a9e29e08a090ac315ca5fb375025d1a44ec1514ddb6be (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T10:03:16.023 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:15 vm03 podman[164767]: 2026-03-10 10:03:15.961973016 +0000 UTC m=+5.071745712 container remove 0e3f0f82bc78bcccc69a9e29e08a090ac315ca5fb375025d1a44ec1514ddb6be (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.schema-version=1.0) 2026-03-10T10:03:16.023 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:15 vm03 bash[164767]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4 2026-03-10T10:03:16.297 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:16.020+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:16.298 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:16 vm03 podman[165295]: 2026-03-10 10:03:16.135734329 +0000 UTC m=+0.021682082 container create 7620bd7fd31b85748c5d7fc1a7a667e8432fb9430d90766345118c248267c0dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T10:03:16.298 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:16 vm03 podman[165295]: 2026-03-10 10:03:16.209289909 +0000 UTC m=+0.095237673 container init 7620bd7fd31b85748c5d7fc1a7a667e8432fb9430d90766345118c248267c0dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2) 2026-03-10T10:03:16.298 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:16 vm03 podman[165295]: 2026-03-10 10:03:16.212442501 +0000 UTC m=+0.098390254 container start 7620bd7fd31b85748c5d7fc1a7a667e8432fb9430d90766345118c248267c0dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.build-date=20260223) 2026-03-10T10:03:16.298 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:16 vm03 podman[165295]: 2026-03-10 10:03:16.214689828 +0000 UTC m=+0.100637571 container attach 7620bd7fd31b85748c5d7fc1a7a667e8432fb9430d90766345118c248267c0dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-4-deactivate, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True) 2026-03-10T10:03:16.298 INFO:journalctl@ceph.osd.4.vm03.stdout:Mar 10 10:03:16 vm03 podman[165295]: 2026-03-10 10:03:16.128283159 +0000 UTC m=+0.014230922 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:03:16.499 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.4.service' 2026-03-10T10:03:16.539 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:03:16.539 INFO:tasks.cephadm.osd.4:Stopped osd.4 2026-03-10T10:03:16.539 INFO:tasks.cephadm.osd.5:Stopping osd.5... 2026-03-10T10:03:16.539 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.5 2026-03-10T10:03:16.620 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:16 vm03 systemd[1]: Stopping Ceph osd.5 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:03:16.993 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:16.700+0000 7fe7027be640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:03:16.993 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:16.700+0000 7fe7027be640 -1 osd.5 162 *** Got signal Terminated *** 2026-03-10T10:03:16.993 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:16.700+0000 7fe7027be640 -1 osd.5 162 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T10:03:16.993 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:16.756+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.397299+0000 front 2026-03-10T10:02:50.397331+0000 (oldest deadline 2026-03-10T10:03:13.896790+0000) 2026-03-10T10:03:16.993 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:16.690+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:17.298 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:16 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:16.991+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:17.370 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:03:16.976Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:03:17.370 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:16 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:03:16.977Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:03:18.016 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:17 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:17.708+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.397299+0000 front 2026-03-10T10:02:50.397331+0000 (oldest deadline 2026-03-10T10:03:13.896790+0000) 2026-03-10T10:03:18.017 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:17 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:17.703+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:18.297 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:18.015+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:19.037 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:18.738+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.397299+0000 front 2026-03-10T10:02:50.397331+0000 (oldest deadline 2026-03-10T10:03:13.896790+0000) 2026-03-10T10:03:19.038 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:18.738+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:53.897529+0000 front 2026-03-10T10:02:53.897236+0000 (oldest deadline 2026-03-10T10:03:17.997109+0000) 2026-03-10T10:03:19.038 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:18 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:18.667+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:19.297 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:19 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:19.036+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:20.013 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:19 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:19.708+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.397299+0000 front 2026-03-10T10:02:50.397331+0000 (oldest deadline 2026-03-10T10:03:13.896790+0000) 2026-03-10T10:03:20.013 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:19 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:19.708+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:53.897529+0000 front 2026-03-10T10:02:53.897236+0000 (oldest deadline 2026-03-10T10:03:17.997109+0000) 2026-03-10T10:03:20.013 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:19 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:19.656+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:20.297 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:20 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:20.012+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:20.297 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:20 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:20.012+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:21.037 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:20 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:20.737+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.397299+0000 front 2026-03-10T10:02:50.397331+0000 (oldest deadline 2026-03-10T10:03:13.896790+0000) 2026-03-10T10:03:21.037 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:20 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5[150274]: 2026-03-10T10:03:20.737+0000 7fe6fedd7640 -1 osd.5 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:53.897529+0000 front 2026-03-10T10:02:53.897236+0000 (oldest deadline 2026-03-10T10:03:17.997109+0000) 2026-03-10T10:03:21.037 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:20 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:20.696+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:21.038 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:20 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:20.696+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:56.245152+0000 front 2026-03-10T10:02:56.245052+0000 (oldest deadline 2026-03-10T10:03:19.744736+0000) 2026-03-10T10:03:21.297 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:21 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:21.036+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:21.297 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:21 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:21.036+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:21.954 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:21 vm03 podman[165394]: 2026-03-10 10:03:21.735430806 +0000 UTC m=+5.051763055 container died 3c6d3a12c7d36a8eaa0da0575d975b04b989eaae79133db2d7b7626fc54c2c68 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T10:03:21.954 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:21 vm03 podman[165394]: 2026-03-10 10:03:21.759595885 +0000 UTC m=+5.075928123 container remove 3c6d3a12c7d36a8eaa0da0575d975b04b989eaae79133db2d7b7626fc54c2c68 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T10:03:21.954 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:21 vm03 bash[165394]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5 2026-03-10T10:03:21.954 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:21 vm03 podman[165462]: 2026-03-10 10:03:21.911926546 +0000 UTC m=+0.017862641 container create 1f936a14b71a73bcf574169cd88cb4b84732125efb52acd45734a606d380b585 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS) 2026-03-10T10:03:21.954 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:21 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:21.699+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:21.954 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:21 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:21.699+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:56.245152+0000 front 2026-03-10T10:02:56.245052+0000 (oldest deadline 2026-03-10T10:03:19.744736+0000) 2026-03-10T10:03:22.220 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:21 vm03 podman[165462]: 2026-03-10 10:03:21.9532255 +0000 UTC m=+0.059161606 container init 1f936a14b71a73bcf574169cd88cb4b84732125efb52acd45734a606d380b585 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0) 2026-03-10T10:03:22.220 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:21 vm03 podman[165462]: 2026-03-10 10:03:21.959340949 +0000 UTC m=+0.065277044 container start 1f936a14b71a73bcf574169cd88cb4b84732125efb52acd45734a606d380b585 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223) 2026-03-10T10:03:22.220 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:22 vm03 podman[165462]: 2026-03-10 10:03:21.903884482 +0000 UTC m=+0.009820587 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:03:22.220 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:22 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:22.040+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:22.220 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:22 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:22.040+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:22.469 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.5.service' 2026-03-10T10:03:22.539 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:22 vm03 podman[165462]: 2026-03-10 10:03:22.220277408 +0000 UTC m=+0.326213512 container attach 1f936a14b71a73bcf574169cd88cb4b84732125efb52acd45734a606d380b585 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True) 2026-03-10T10:03:22.539 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:22 vm03 podman[165462]: 2026-03-10 10:03:22.221099576 +0000 UTC m=+0.327035672 container died 1f936a14b71a73bcf574169cd88cb4b84732125efb52acd45734a606d380b585 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T10:03:22.539 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:22 vm03 podman[165462]: 2026-03-10 10:03:22.457779005 +0000 UTC m=+0.563715100 container remove 1f936a14b71a73bcf574169cd88cb4b84732125efb52acd45734a606d380b585 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-5-deactivate, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-10T10:03:22.539 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:22 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.5.service: Deactivated successfully. 2026-03-10T10:03:22.539 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:22 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.5.service: Unit process 165472 (conmon) remains running after unit stopped. 2026-03-10T10:03:22.539 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:22 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.5.service: Unit process 165481 (podman) remains running after unit stopped. 2026-03-10T10:03:22.539 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:22 vm03 systemd[1]: Stopped Ceph osd.5 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:03:22.539 INFO:journalctl@ceph.osd.5.vm03.stdout:Mar 10 10:03:22 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.5.service: Consumed 2.855s CPU time, 132.9M memory peak. 2026-03-10T10:03:22.549 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:03:22.549 INFO:tasks.cephadm.osd.5:Stopped osd.5 2026-03-10T10:03:22.549 INFO:tasks.cephadm.osd.6:Stopping osd.6... 2026-03-10T10:03:22.549 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.6 2026-03-10T10:03:22.798 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:22 vm03 systemd[1]: Stopping Ceph osd.6 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:03:22.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:22 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:22.669+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:22.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:22 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:22.669+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:56.245152+0000 front 2026-03-10T10:02:56.245052+0000 (oldest deadline 2026-03-10T10:03:19.744736+0000) 2026-03-10T10:03:22.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:22 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:22.718+0000 7f28794a5640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:03:22.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:22 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:22.718+0000 7f28794a5640 -1 osd.6 162 *** Got signal Terminated *** 2026-03-10T10:03:22.799 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:22 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:22.718+0000 7f28794a5640 -1 osd.6 162 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T10:03:23.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:23 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:23.072+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:23.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:23 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:23.072+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:24.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:23 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:23.670+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:24.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:23 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:23.670+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:56.245152+0000 front 2026-03-10T10:02:56.245052+0000 (oldest deadline 2026-03-10T10:03:19.744736+0000) 2026-03-10T10:03:24.547 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:24.051+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:24.547 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:24.051+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:25.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:24.696+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:25.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:24.696+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:56.245152+0000 front 2026-03-10T10:02:56.245052+0000 (oldest deadline 2026-03-10T10:03:19.744736+0000) 2026-03-10T10:03:25.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:24 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:24.696+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T10:03:03.245654+0000 front 2026-03-10T10:03:03.245898+0000 (oldest deadline 2026-03-10T10:03:24.345375+0000) 2026-03-10T10:03:25.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:25 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:25.070+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:25.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:25 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:25.070+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:26.047 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:25 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:25.698+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:26.047 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:25 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:25.698+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:56.245152+0000 front 2026-03-10T10:02:56.245052+0000 (oldest deadline 2026-03-10T10:03:19.744736+0000) 2026-03-10T10:03:26.048 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:25 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:25.698+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T10:03:03.245654+0000 front 2026-03-10T10:03:03.245898+0000 (oldest deadline 2026-03-10T10:03:24.345375+0000) 2026-03-10T10:03:26.339 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:26.102+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:26.339 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:26.102+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:26.675 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:26.673+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:26.675 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:26.673+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:56.245152+0000 front 2026-03-10T10:02:56.245052+0000 (oldest deadline 2026-03-10T10:03:19.744736+0000) 2026-03-10T10:03:26.675 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:26 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:26.673+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T10:03:03.245654+0000 front 2026-03-10T10:03:03.245898+0000 (oldest deadline 2026-03-10T10:03:24.345375+0000) 2026-03-10T10:03:27.369 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:03:26.977Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:03:27.370 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:26 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:03:26.978Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T10:03:27.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:27.121+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:27.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:27.121+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:27.945 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:27.677+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:50.945192+0000 front 2026-03-10T10:02:50.945256+0000 (oldest deadline 2026-03-10T10:03:16.244486+0000) 2026-03-10T10:03:27.945 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:27.677+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:56.245152+0000 front 2026-03-10T10:02:56.245052+0000 (oldest deadline 2026-03-10T10:03:19.744736+0000) 2026-03-10T10:03:27.945 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:27 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6[152800]: 2026-03-10T10:03:27.677+0000 7f2875abe640 -1 osd.6 162 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T10:03:03.245654+0000 front 2026-03-10T10:03:03.245898+0000 (oldest deadline 2026-03-10T10:03:24.345375+0000) 2026-03-10T10:03:27.945 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:27 vm03 podman[165558]: 2026-03-10 10:03:27.743618022 +0000 UTC m=+5.061708052 container died 09cb5d413a117bdd5f91db1cfe495f2d4378003cd81461f1d986873cd95ad68e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS) 2026-03-10T10:03:27.945 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:27 vm03 podman[165558]: 2026-03-10 10:03:27.774599261 +0000 UTC m=+5.092689291 container remove 09cb5d413a117bdd5f91db1cfe495f2d4378003cd81461f1d986873cd95ad68e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T10:03:27.945 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:27 vm03 bash[165558]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6 2026-03-10T10:03:28.288 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:28 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:28.112+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:28.288 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:28 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:28.112+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:28.289 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:27 vm03 podman[165638]: 2026-03-10 10:03:27.944981426 +0000 UTC m=+0.023052527 container create eaef93ad023e7eefd22c3abe4a3f32509b63bdacc943ab24db9c34fa0641775c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T10:03:28.289 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:28 vm03 podman[165638]: 2026-03-10 10:03:28.001331086 +0000 UTC m=+0.079402207 container init eaef93ad023e7eefd22c3abe4a3f32509b63bdacc943ab24db9c34fa0641775c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T10:03:28.289 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:28 vm03 podman[165638]: 2026-03-10 10:03:28.004488708 +0000 UTC m=+0.082559809 container start eaef93ad023e7eefd22c3abe4a3f32509b63bdacc943ab24db9c34fa0641775c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T10:03:28.289 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:28 vm03 podman[165638]: 2026-03-10 10:03:28.010224596 +0000 UTC m=+0.088295707 container attach eaef93ad023e7eefd22c3abe4a3f32509b63bdacc943ab24db9c34fa0641775c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-10T10:03:28.289 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:28 vm03 podman[165638]: 2026-03-10 10:03:27.93436402 +0000 UTC m=+0.012435141 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:03:28.289 INFO:journalctl@ceph.osd.6.vm03.stdout:Mar 10 10:03:28 vm03 podman[165657]: 2026-03-10 10:03:28.157628284 +0000 UTC m=+0.010439323 container died eaef93ad023e7eefd22c3abe4a3f32509b63bdacc943ab24db9c34fa0641775c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-6-deactivate, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default) 2026-03-10T10:03:28.310 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.6.service' 2026-03-10T10:03:28.344 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:03:28.344 INFO:tasks.cephadm.osd.6:Stopped osd.6 2026-03-10T10:03:28.344 INFO:tasks.cephadm.osd.7:Stopping osd.7... 2026-03-10T10:03:28.344 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.7 2026-03-10T10:03:28.547 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:28 vm03 systemd[1]: Stopping Ceph osd.7 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:03:28.547 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:28 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:28.485+0000 7f17d6c01640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T10:03:28.547 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:28 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:28.485+0000 7f17d6c01640 -1 osd.7 162 *** Got signal Terminated *** 2026-03-10T10:03:28.547 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:28 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:28.485+0000 7f17d6c01640 -1 osd.7 162 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T10:03:29.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:29.134+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:29.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:29.134+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:29.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:29 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:29.134+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T10:03:02.580353+0000 front 2026-03-10T10:03:02.580460+0000 (oldest deadline 2026-03-10T10:03:28.480141+0000) 2026-03-10T10:03:30.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:30.151+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:30.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:30.151+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:30.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:30.151+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T10:03:02.580353+0000 front 2026-03-10T10:03:02.580460+0000 (oldest deadline 2026-03-10T10:03:28.480141+0000) 2026-03-10T10:03:30.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:30 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:30.151+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6830 osd.3 since back 2026-03-10T10:03:08.480834+0000 front 2026-03-10T10:03:08.480839+0000 (oldest deadline 2026-03-10T10:03:29.580495+0000) 2026-03-10T10:03:31.547 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:31 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:31.147+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:31.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:31 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:31.147+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:31.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:31 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:31.147+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T10:03:02.580353+0000 front 2026-03-10T10:03:02.580460+0000 (oldest deadline 2026-03-10T10:03:28.480141+0000) 2026-03-10T10:03:31.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:31 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:31.147+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6830 osd.3 since back 2026-03-10T10:03:08.480834+0000 front 2026-03-10T10:03:08.480839+0000 (oldest deadline 2026-03-10T10:03:29.580495+0000) 2026-03-10T10:03:32.547 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:32.166+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:32.547 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:32.166+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:32.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:32.166+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T10:03:02.580353+0000 front 2026-03-10T10:03:02.580460+0000 (oldest deadline 2026-03-10T10:03:28.480141+0000) 2026-03-10T10:03:32.548 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:32 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:32.166+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6830 osd.3 since back 2026-03-10T10:03:08.480834+0000 front 2026-03-10T10:03:08.480839+0000 (oldest deadline 2026-03-10T10:03:29.580495+0000) 2026-03-10T10:03:33.450 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:33.128+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T10:02:48.579493+0000 front 2026-03-10T10:02:48.579423+0000 (oldest deadline 2026-03-10T10:03:13.878930+0000) 2026-03-10T10:03:33.450 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:33.128+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T10:02:54.380045+0000 front 2026-03-10T10:02:54.379941+0000 (oldest deadline 2026-03-10T10:03:19.679574+0000) 2026-03-10T10:03:33.451 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:33.128+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T10:03:02.580353+0000 front 2026-03-10T10:03:02.580460+0000 (oldest deadline 2026-03-10T10:03:28.480141+0000) 2026-03-10T10:03:33.451 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7[155333]: 2026-03-10T10:03:33.128+0000 7f17d2a19640 -1 osd.7 162 heartbeat_check: no reply from 192.168.123.100:6830 osd.3 since back 2026-03-10T10:03:08.480834+0000 front 2026-03-10T10:03:08.480839+0000 (oldest deadline 2026-03-10T10:03:29.580495+0000) 2026-03-10T10:03:33.797 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 podman[165736]: 2026-03-10 10:03:33.511561457 +0000 UTC m=+5.039530894 container died 6aa4b0e4646a80e7620bdf58b26743d1e6493e431a0899e7d9e2f9fe9fd6d817 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T10:03:33.797 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 podman[165736]: 2026-03-10 10:03:33.568941597 +0000 UTC m=+5.096911034 container remove 6aa4b0e4646a80e7620bdf58b26743d1e6493e431a0899e7d9e2f9fe9fd6d817 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.build-date=20260223, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2) 2026-03-10T10:03:33.797 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 bash[165736]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7 2026-03-10T10:03:34.032 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.7.service' 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 podman[166060]: 2026-03-10 10:03:33.796917913 +0000 UTC m=+0.052641770 container create 9385ad8ce4da84bc592d5a8a06adf31df7db7f1c42ae803ac9918a4eb23b9bba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 podman[166060]: 2026-03-10 10:03:33.847175398 +0000 UTC m=+0.102899255 container init 9385ad8ce4da84bc592d5a8a06adf31df7db7f1c42ae803ac9918a4eb23b9bba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_REF=squid, OSD_FLAVOR=default) 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 podman[166060]: 2026-03-10 10:03:33.850023699 +0000 UTC m=+0.105747556 container start 9385ad8ce4da84bc592d5a8a06adf31df7db7f1c42ae803ac9918a4eb23b9bba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0) 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 podman[166060]: 2026-03-10 10:03:33.756007936 +0000 UTC m=+0.011731793 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 podman[166060]: 2026-03-10 10:03:33.855180553 +0000 UTC m=+0.110904401 container attach 9385ad8ce4da84bc592d5a8a06adf31df7db7f1c42ae803ac9918a4eb23b9bba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 conmon[166081]: conmon 9385ad8ce4da84bc592d : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-9385ad8ce4da84bc592d5a8a06adf31df7db7f1c42ae803ac9918a4eb23b9bba.scope/container/memory.events 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:33 vm03 podman[166060]: 2026-03-10 10:03:33.994281609 +0000 UTC m=+0.250005466 container died 9385ad8ce4da84bc592d5a8a06adf31df7db7f1c42ae803ac9918a4eb23b9bba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:34 vm03 podman[166060]: 2026-03-10 10:03:34.014599395 +0000 UTC m=+0.270323252 container remove 9385ad8ce4da84bc592d5a8a06adf31df7db7f1c42ae803ac9918a4eb23b9bba (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-osd-7-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:34 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.7.service: Deactivated successfully. 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:34 vm03 systemd[1]: Stopped Ceph osd.7 for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:03:34.049 INFO:journalctl@ceph.osd.7.vm03.stdout:Mar 10 10:03:34 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@osd.7.service: Consumed 2.630s CPU time. 2026-03-10T10:03:34.073 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:03:34.073 INFO:tasks.cephadm.osd.7:Stopped osd.7 2026-03-10T10:03:34.073 INFO:tasks.cephadm.prometheus.a:Stopping prometheus.a... 2026-03-10T10:03:34.073 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@prometheus.a 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 systemd[1]: Stopping Ceph prometheus.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.249Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.249Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.249Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.249Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.249Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.249Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.249Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.249Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.250Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.252Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.252Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a[97767]: ts=2026-03-10T10:03:34.253Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 podman[166255]: 2026-03-10 10:03:34.259996954 +0000 UTC m=+0.031918675 container died fbf1a95f0e5596e8e5da32b49fc8bcd5d6cc5d95f71ea763564bfd18099e7c87 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 podman[166255]: 2026-03-10 10:03:34.295022524 +0000 UTC m=+0.066944254 container remove fbf1a95f0e5596e8e5da32b49fc8bcd5d6cc5d95f71ea763564bfd18099e7c87 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T10:03:34.383 INFO:journalctl@ceph.prometheus.a.vm03.stdout:Mar 10 10:03:34 vm03 bash[166255]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-prometheus-a 2026-03-10T10:03:34.391 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@prometheus.a.service' 2026-03-10T10:03:34.428 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T10:03:34.428 INFO:tasks.cephadm.prometheus.a:Stopped prometheus.a 2026-03-10T10:03:34.428 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa --force --keep-logs 2026-03-10T10:03:35.968 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:35 vm00 systemd[1]: Stopping Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:03:36.318 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:36 vm00 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a[85064]: ts=2026-03-10T10:03:36.043Z caller=main.go:583 level=info msg="Received SIGTERM, exiting gracefully..." 2026-03-10T10:03:36.318 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:36 vm00 podman[170365]: 2026-03-10 10:03:36.055930687 +0000 UTC m=+0.027509803 container died 44fbe9462c876e42b51d3ab0b10ac0c533098adf52eea628e6e116b60cbc65e9 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T10:03:36.318 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:36 vm00 podman[170365]: 2026-03-10 10:03:36.072241848 +0000 UTC m=+0.043820964 container remove 44fbe9462c876e42b51d3ab0b10ac0c533098adf52eea628e6e116b60cbc65e9 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T10:03:36.318 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:36 vm00 podman[170365]: 2026-03-10 10:03:36.073485696 +0000 UTC m=+0.045064812 volume remove 2eccfcbe1cbef02e48c98b04f8824430f105fcbc56d683430cadf6bca73986f7 2026-03-10T10:03:36.318 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:36 vm00 bash[170365]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-alertmanager-a 2026-03-10T10:03:36.318 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:36 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@alertmanager.a.service: Deactivated successfully. 2026-03-10T10:03:36.318 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:36 vm00 systemd[1]: Stopped Ceph alertmanager.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:03:36.318 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 10:03:36 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@alertmanager.a.service: Consumed 1.357s CPU time. 2026-03-10T10:03:36.576 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 10:03:36 vm00 systemd[1]: Stopping Ceph node-exporter.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:03:36.576 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 10:03:36 vm00 podman[170458]: 2026-03-10 10:03:36.404666318 +0000 UTC m=+0.027854698 container died b0bf12b366fd754a8e14dcf8e2a90cd2a4b4abafcd18ef40a9f7f27f380e1545 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T10:03:36.577 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 10:03:36 vm00 podman[170458]: 2026-03-10 10:03:36.421414677 +0000 UTC m=+0.044603057 container remove b0bf12b366fd754a8e14dcf8e2a90cd2a4b4abafcd18ef40a9f7f27f380e1545 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T10:03:36.577 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 10:03:36 vm00 bash[170458]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-a 2026-03-10T10:03:36.577 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 10:03:36 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-10T10:03:36.577 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 10:03:36 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-10T10:03:36.577 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 10:03:36 vm00 systemd[1]: Stopped Ceph node-exporter.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:03:36.577 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 10:03:36 vm00 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.a.service: Consumed 1.393s CPU time. 2026-03-10T10:04:08.106 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa --force --keep-logs 2026-03-10T10:04:09.658 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 10:04:09 vm03 systemd[1]: Stopping Ceph node-exporter.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:04:09.658 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 10:04:09 vm03 podman[167109]: 2026-03-10 10:04:09.469580948 +0000 UTC m=+0.014655554 container stop 4e046233188d3617f420463a450aa0dcfa0348675bf974dd16e8a4e84e8c60dc (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T10:04:09.658 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 10:04:09 vm03 podman[167109]: 2026-03-10 10:04:09.499178851 +0000 UTC m=+0.044253467 container died 4e046233188d3617f420463a450aa0dcfa0348675bf974dd16e8a4e84e8c60dc (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T10:04:09.658 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 10:04:09 vm03 podman[167109]: 2026-03-10 10:04:09.511359511 +0000 UTC m=+0.056434117 container remove 4e046233188d3617f420463a450aa0dcfa0348675bf974dd16e8a4e84e8c60dc (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T10:04:09.658 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 10:04:09 vm03 bash[167109]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-node-exporter-b 2026-03-10T10:04:09.658 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 10:04:09 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-10T10:04:09.658 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 10:04:09 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-10T10:04:09.658 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 10:04:09 vm03 systemd[1]: Stopped Ceph node-exporter.b for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:04:09.658 INFO:journalctl@ceph.node-exporter.b.vm03.stdout:Mar 10 10:04:09 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@node-exporter.b.service: Consumed 1.431s CPU time. 2026-03-10T10:04:10.009 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 10:04:09 vm03 systemd[1]: Stopping Ceph grafana.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa... 2026-03-10T10:04:10.009 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 10:04:09 vm03 ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a[74831]: t=2026-03-10T10:04:09+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-10T10:04:10.009 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 10:04:09 vm03 podman[167229]: 2026-03-10 10:04:09.948810364 +0000 UTC m=+0.028031261 container died 011f2081bf92adf5420f7cf8779c02748b19d9c3b3dc6912550d6c6cdd54fcac (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, io.k8s.display-name=Red Hat Universal Base Image 8, description=Ceph Grafana Container, release=236.1648460182, version=8.5, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.expose-services=, io.buildah.version=1.24.2, vendor=Red Hat, Inc., com.redhat.component=ubi8-container, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, architecture=x86_64, build-date=2022-03-28T10:36:18.413762, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel8, maintainer=Paul Cuzner , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, name=ubi8, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, summary=Grafana Container configured for Ceph mgr/dashboard integration) 2026-03-10T10:04:10.009 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 10:04:09 vm03 podman[167229]: 2026-03-10 10:04:09.980184192 +0000 UTC m=+0.059405089 container remove 011f2081bf92adf5420f7cf8779c02748b19d9c3b3dc6912550d6c6cdd54fcac (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a, description=Ceph Grafana Container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, architecture=x86_64, com.redhat.component=ubi8-container, io.openshift.expose-services=, io.openshift.tags=base rhel8, vendor=Red Hat, Inc., distribution-scope=public, name=ubi8, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Paul Cuzner , io.buildah.version=1.24.2, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=8.5, release=236.1648460182, summary=Grafana Container configured for Ceph mgr/dashboard integration, io.k8s.display-name=Red Hat Universal Base Image 8, vcs-type=git, build-date=2022-03-28T10:36:18.413762) 2026-03-10T10:04:10.009 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 10:04:09 vm03 bash[167229]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana-a 2026-03-10T10:04:10.009 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 10:04:09 vm03 bash[167249]: Error: no container with name or ID "ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa-grafana.a" found: no such container 2026-03-10T10:04:10.297 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 10:04:10 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@grafana.a.service: Deactivated successfully. 2026-03-10T10:04:10.297 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 10:04:10 vm03 systemd[1]: Stopped Ceph grafana.a for e2d4b2ee-1c65-11f1-bae0-b525704df8fa. 2026-03-10T10:04:10.297 INFO:journalctl@ceph.grafana.a.vm03.stdout:Mar 10 10:04:10 vm03 systemd[1]: ceph-e2d4b2ee-1c65-11f1-bae0-b525704df8fa@grafana.a.service: Consumed 1.988s CPU time. 2026-03-10T10:04:31.170 DEBUG:teuthology.orchestra.run.vm00:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T10:04:31.200 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T10:04:31.236 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-10T10:04:31.236 DEBUG:teuthology.misc:Transferring archived files from vm00:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/988/remote/vm00/crash 2026-03-10T10:04:31.236 DEBUG:teuthology.orchestra.run.vm00:> sudo tar c -f - -C /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/crash -- . 2026-03-10T10:04:31.270 INFO:teuthology.orchestra.run.vm00.stderr:tar: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/crash: Cannot open: No such file or directory 2026-03-10T10:04:31.270 INFO:teuthology.orchestra.run.vm00.stderr:tar: Error is not recoverable: exiting now 2026-03-10T10:04:31.271 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/988/remote/vm03/crash 2026-03-10T10:04:31.271 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/crash -- . 2026-03-10T10:04:31.303 INFO:teuthology.orchestra.run.vm03.stderr:tar: /var/lib/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/crash: Cannot open: No such file or directory 2026-03-10T10:04:31.303 INFO:teuthology.orchestra.run.vm03.stderr:tar: Error is not recoverable: exiting now 2026-03-10T10:04:31.304 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-10T10:04:31.304 DEBUG:teuthology.orchestra.run.vm00:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_STRAY_DAEMON | egrep -v CEPHADM_FAILED_DAEMON | egrep -v CEPHADM_AGENT_DOWN | head -n 1 2026-03-10T10:04:31.345 INFO:tasks.cephadm:Compressing logs... 2026-03-10T10:04:31.345 DEBUG:teuthology.orchestra.run.vm00:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T10:04:31.387 DEBUG:teuthology.orchestra.run.vm03:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T10:04:31.414 INFO:teuthology.orchestra.run.vm03.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T10:04:31.415 INFO:teuthology.orchestra.run.vm03.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T10:04:31.416 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-volume.log 2026-03-10T10:04:31.416 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mon.b.log 2026-03-10T10:04:31.419 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.cephadm.log 2026-03-10T10:04:31.429 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mon.b.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.audit.log 2026-03-10T10:04:31.430 INFO:teuthology.orchestra.run.vm00.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T10:04:31.431 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T10:04:31.432 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mon.a.log 2026-03-10T10:04:31.432 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.log 2026-03-10T10:04:31.434 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.cephadm.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.log 2026-03-10T10:04:31.436 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mon.a.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.audit.log 2026-03-10T10:04:31.441 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.audit.log: 90.7% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.cephadm.log.gz 2026-03-10T10:04:31.443 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mgr.x.log 2026-03-10T10:04:31.444 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mgr.y.log 2026-03-10T10:04:31.446 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.log: 90.0% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T10:04:31.448 INFO:teuthology.orchestra.run.vm00.stderr: 94.1%/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.audit.log: -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.log.gz 2026-03-10T10:04:31.448 INFO:teuthology.orchestra.run.vm03.stderr: 89.6%gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.4.log 2026-03-10T10:04:31.450 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.cephadm.log 2026-03-10T10:04:31.454 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mgr.x.log: -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.log.gz 2026-03-10T10:04:31.455 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.5.log 2026-03-10T10:04:31.458 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mgr.y.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-volume.log 2026-03-10T10:04:31.460 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.4.log: 93.2% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.audit.log.gz 2026-03-10T10:04:31.464 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.6.log 2026-03-10T10:04:31.465 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.cephadm.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mon.c.log 2026-03-10T10:04:31.465 INFO:teuthology.orchestra.run.vm00.stderr: 89.2% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T10:04:31.469 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.7.log 2026-03-10T10:04:31.472 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.0.log 2026-03-10T10:04:31.476 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mon.c.log: 94.7% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.cephadm.log.gz 2026-03-10T10:04:31.477 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.foo.vm03.smqfat.log 2026-03-10T10:04:31.481 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.1.log 2026-03-10T10:04:31.488 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.0.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.2.log 2026-03-10T10:04:31.488 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.7.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.smpl.vm03.evsibt.log 2026-03-10T10:04:31.491 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.foo.vm03.smqfat.log: 75.8% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.foo.vm03.smqfat.log.gz 2026-03-10T10:04:31.497 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.smpl.vm03.evsibt.log: 76.5% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.smpl.vm03.evsibt.log.gz 2026-03-10T10:04:31.501 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.3.log 2026-03-10T10:04:31.516 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.foo.vm00.gcwrcv.log 2026-03-10T10:04:31.530 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.smpl.vm00.tpyqjn.log 2026-03-10T10:04:31.532 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.foo.vm00.gcwrcv.log: 95.6% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph.audit.log.gz 2026-03-10T10:04:31.536 INFO:teuthology.orchestra.run.vm00.stderr: 81.9% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.foo.vm00.gcwrcv.log.gz 2026-03-10T10:04:31.543 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/tcmu-runner.log 2026-03-10T10:04:31.554 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.smpl.vm00.tpyqjn.log: 81.9% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-client.rgw.smpl.vm00.tpyqjn.log.gz 2026-03-10T10:04:31.558 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/tcmu-runner.log: 86.9% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/tcmu-runner.log.gz 2026-03-10T10:04:31.569 INFO:teuthology.orchestra.run.vm03.stderr: 92.7% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-volume.log.gz 2026-03-10T10:04:31.702 INFO:teuthology.orchestra.run.vm00.stderr: 93.2% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-volume.log.gz 2026-03-10T10:04:31.823 INFO:teuthology.orchestra.run.vm03.stderr: 92.2% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mgr.x.log.gz 2026-03-10T10:04:32.236 INFO:teuthology.orchestra.run.vm03.stderr: 92.7% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mon.b.log.gz 2026-03-10T10:04:33.182 INFO:teuthology.orchestra.run.vm00.stderr: 92.6% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mon.c.log.gz 2026-03-10T10:04:33.198 INFO:teuthology.orchestra.run.vm00.stderr: 91.7% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mgr.y.log.gz 2026-03-10T10:04:33.211 INFO:teuthology.orchestra.run.vm03.stderr: 93.8% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.6.log.gz 2026-03-10T10:04:33.613 INFO:teuthology.orchestra.run.vm03.stderr: 93.9% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.5.log.gz 2026-03-10T10:04:33.655 INFO:teuthology.orchestra.run.vm03.stderr: 94.3% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.7.log.gz 2026-03-10T10:04:33.801 INFO:teuthology.orchestra.run.vm03.stderr: 94.0% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.4.log.gz 2026-03-10T10:04:33.803 INFO:teuthology.orchestra.run.vm03.stderr: 2026-03-10T10:04:33.803 INFO:teuthology.orchestra.run.vm03.stderr:real 0m2.399s 2026-03-10T10:04:33.803 INFO:teuthology.orchestra.run.vm03.stderr:user 0m4.168s 2026-03-10T10:04:33.803 INFO:teuthology.orchestra.run.vm03.stderr:sys 0m0.246s 2026-03-10T10:04:33.872 INFO:teuthology.orchestra.run.vm00.stderr: 93.8% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.2.log.gz 2026-03-10T10:04:34.438 INFO:teuthology.orchestra.run.vm00.stderr: 94.0% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.0.log.gz 2026-03-10T10:04:34.569 INFO:teuthology.orchestra.run.vm00.stderr: 91.8% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-mon.a.log.gz 2026-03-10T10:04:34.596 INFO:teuthology.orchestra.run.vm00.stderr: 94.0% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.1.log.gz 2026-03-10T10:04:34.652 INFO:teuthology.orchestra.run.vm00.stderr: 94.0% -- replaced with /var/log/ceph/e2d4b2ee-1c65-11f1-bae0-b525704df8fa/ceph-osd.3.log.gz 2026-03-10T10:04:34.654 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T10:04:34.654 INFO:teuthology.orchestra.run.vm00.stderr:real 0m3.245s 2026-03-10T10:04:34.654 INFO:teuthology.orchestra.run.vm00.stderr:user 0m5.464s 2026-03-10T10:04:34.654 INFO:teuthology.orchestra.run.vm00.stderr:sys 0m0.270s 2026-03-10T10:04:34.654 INFO:tasks.cephadm:Archiving logs... 2026-03-10T10:04:34.654 DEBUG:teuthology.misc:Transferring archived files from vm00:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/988/remote/vm00/log 2026-03-10T10:04:34.655 DEBUG:teuthology.orchestra.run.vm00:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T10:04:34.949 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/988/remote/vm03/log 2026-03-10T10:04:34.949 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T10:04:35.172 INFO:tasks.cephadm:Removing cluster... 2026-03-10T10:04:35.173 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa --force 2026-03-10T10:04:35.378 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e2d4b2ee-1c65-11f1-bae0-b525704df8fa --force 2026-03-10T10:04:35.584 INFO:tasks.cephadm:Removing cephadm ... 2026-03-10T10:04:35.584 DEBUG:teuthology.orchestra.run.vm00:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T10:04:35.602 DEBUG:teuthology.orchestra.run.vm03:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T10:04:35.618 INFO:tasks.cephadm:Teardown complete 2026-03-10T10:04:35.618 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-10T10:04:35.620 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-10T10:04:35.620 DEBUG:teuthology.orchestra.run.vm00:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T10:04:35.644 DEBUG:teuthology.orchestra.run.vm03:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T10:04:35.658 INFO:teuthology.orchestra.run.vm00.stderr:bash: line 1: ntpq: command not found 2026-03-10T10:04:35.675 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-10T10:04:35.803 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T10:04:35.803 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-10T10:04:35.803 INFO:teuthology.orchestra.run.vm03.stdout:^- ctb01.martinmoerch.dk 2 6 377 36 -880us[ -898us] +/- 28ms 2026-03-10T10:04:35.803 INFO:teuthology.orchestra.run.vm03.stdout:^* static.222.16.42.77.clie> 2 6 377 35 +70us[ +51us] +/- 2619us 2026-03-10T10:04:35.803 INFO:teuthology.orchestra.run.vm03.stdout:^- home.of.the.smiling-prox> 1 7 377 34 -3329us[-3329us] +/- 22ms 2026-03-10T10:04:35.803 INFO:teuthology.orchestra.run.vm03.stdout:^- srv01.spectre-net.de 2 6 377 32 -1626us[-1626us] +/- 13ms 2026-03-10T10:04:35.803 INFO:teuthology.orchestra.run.vm00.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T10:04:35.803 INFO:teuthology.orchestra.run.vm00.stdout:=============================================================================== 2026-03-10T10:04:35.804 INFO:teuthology.orchestra.run.vm00.stdout:^- home.of.the.smiling-prox> 1 7 377 35 -3005us[-3023us] +/- 22ms 2026-03-10T10:04:35.804 INFO:teuthology.orchestra.run.vm00.stdout:^- ctb01.martinmoerch.dk 2 6 377 32 +1633us[+1633us] +/- 28ms 2026-03-10T10:04:35.804 INFO:teuthology.orchestra.run.vm00.stdout:^- srv01.spectre-net.de 2 6 377 36 -1451us[-1469us] +/- 13ms 2026-03-10T10:04:35.804 INFO:teuthology.orchestra.run.vm00.stdout:^* static.222.16.42.77.clie> 2 6 377 33 -147us[ -165us] +/- 2518us 2026-03-10T10:04:35.804 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-10T10:04:35.806 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-10T10:04:35.806 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-10T10:04:35.809 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-10T10:04:35.811 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-10T10:04:35.812 INFO:teuthology.task.internal:Duration was 1303.411688 seconds 2026-03-10T10:04:35.813 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-10T10:04:35.814 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-10T10:04:35.815 DEBUG:teuthology.orchestra.run.vm00:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T10:04:35.846 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T10:04:35.884 INFO:teuthology.orchestra.run.vm00.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T10:04:35.887 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T10:04:36.321 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-10T10:04:36.321 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm00.local 2026-03-10T10:04:36.321 DEBUG:teuthology.orchestra.run.vm00:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T10:04:36.347 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm03.local 2026-03-10T10:04:36.348 DEBUG:teuthology.orchestra.run.vm03:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T10:04:36.389 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-10T10:04:36.389 DEBUG:teuthology.orchestra.run.vm00:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T10:04:36.390 DEBUG:teuthology.orchestra.run.vm03:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T10:04:37.187 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-10T10:04:37.187 DEBUG:teuthology.orchestra.run.vm00:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T10:04:37.188 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T10:04:37.213 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T10:04:37.214 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T10:04:37.214 INFO:teuthology.orchestra.run.vm00.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T10:04:37.214 INFO:teuthology.orchestra.run.vm00.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T10:04:37.214 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T10:04:37.214 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T10:04:37.214 INFO:teuthology.orchestra.run.vm00.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T10:04:37.215 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T10:04:37.215 INFO:teuthology.orchestra.run.vm03.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T10:04:37.215 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T10:04:37.408 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T10:04:37.454 INFO:teuthology.orchestra.run.vm00.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 96.9% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T10:04:37.489 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-10T10:04:37.524 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-10T10:04:37.524 DEBUG:teuthology.orchestra.run.vm00:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T10:04:37.553 DEBUG:teuthology.orchestra.run.vm03:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T10:04:37.579 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-10T10:04:37.693 DEBUG:teuthology.orchestra.run.vm00:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T10:04:37.694 DEBUG:teuthology.orchestra.run.vm03:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T10:04:37.719 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern = core 2026-03-10T10:04:37.720 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = core 2026-03-10T10:04:37.867 DEBUG:teuthology.orchestra.run.vm00:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T10:04:37.883 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T10:04:37.883 DEBUG:teuthology.orchestra.run.vm03:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T10:04:37.921 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T10:04:37.921 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-10T10:04:37.924 INFO:teuthology.task.internal:Transferring archived files... 2026-03-10T10:04:37.925 DEBUG:teuthology.misc:Transferring archived files from vm00:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/988/remote/vm00 2026-03-10T10:04:37.925 DEBUG:teuthology.orchestra.run.vm00:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T10:04:37.963 DEBUG:teuthology.misc:Transferring archived files from vm03:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/988/remote/vm03 2026-03-10T10:04:37.963 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T10:04:37.997 INFO:teuthology.task.internal:Removing archive directory... 2026-03-10T10:04:37.997 DEBUG:teuthology.orchestra.run.vm00:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T10:04:38.001 DEBUG:teuthology.orchestra.run.vm03:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T10:04:38.054 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-10T10:04:38.057 INFO:teuthology.task.internal:Not uploading archives. 2026-03-10T10:04:38.057 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-10T10:04:38.059 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-10T10:04:38.059 DEBUG:teuthology.orchestra.run.vm00:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T10:04:38.061 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T10:04:38.075 INFO:teuthology.orchestra.run.vm00.stdout: 8532143 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 10:04 /home/ubuntu/cephtest 2026-03-10T10:04:38.113 INFO:teuthology.orchestra.run.vm03.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 10:04 /home/ubuntu/cephtest 2026-03-10T10:04:38.113 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-10T10:04:38.121 INFO:teuthology.run:Summary data: description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/classic} duration: 1303.4116880893707 owner: kyr success: true 2026-03-10T10:04:38.121 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T10:04:38.142 INFO:teuthology.run:pass